summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /lib
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/.gitignore9
-rw-r--r--lib/842/842.h130
-rw-r--r--lib/842/842_compress.c630
-rw-r--r--lib/842/842_debugfs.h50
-rw-r--r--lib/842/842_decompress.c417
-rw-r--r--lib/842/Makefile3
-rw-r--r--lib/Kconfig766
-rw-r--r--lib/Kconfig.debug3027
-rw-r--r--lib/Kconfig.kasan210
-rw-r--r--lib/Kconfig.kcsan257
-rw-r--r--lib/Kconfig.kfence99
-rw-r--r--lib/Kconfig.kgdb165
-rw-r--r--lib/Kconfig.kmsan63
-rw-r--r--lib/Kconfig.ubsan163
-rw-r--r--lib/Makefile440
-rw-r--r--lib/argv_split.c95
-rw-r--r--lib/ashldi3.c32
-rw-r--r--lib/ashrdi3.c34
-rw-r--r--lib/asn1_decoder.c521
-rw-r--r--lib/asn1_encoder.c452
-rw-r--r--lib/assoc_array.c1726
-rw-r--r--lib/atomic64.c189
-rw-r--r--lib/atomic64_test.c276
-rw-r--r--lib/audit.c89
-rw-r--r--lib/base64.c103
-rw-r--r--lib/bcd.c15
-rw-r--r--lib/bch.c1412
-rw-r--r--lib/bitfield_kunit.c154
-rw-r--r--lib/bitmap.c1551
-rw-r--r--lib/bitrev.c47
-rw-r--r--lib/bootconfig-data.S10
-rw-r--r--lib/bootconfig.c985
-rw-r--r--lib/bsearch.c36
-rw-r--r--lib/btree.c796
-rw-r--r--lib/bucket_locks.c54
-rw-r--r--lib/bug.c246
-rwxr-xr-xlib/build_OID_registry203
-rw-r--r--lib/buildid.c191
-rw-r--r--lib/bust_spinlocks.c29
-rw-r--r--lib/check_signature.c27
-rw-r--r--lib/checksum.c173
-rw-r--r--lib/checksum_kunit.c356
-rw-r--r--lib/clz_ctz.c43
-rw-r--r--lib/clz_tab.c19
-rw-r--r--lib/cmdline.c275
-rw-r--r--lib/cmdline_kunit.c156
-rw-r--r--lib/cmpdi2.c30
-rw-r--r--lib/compat_audit.c56
-rw-r--r--lib/cpu_rmap.c339
-rw-r--r--lib/cpumask.c199
-rw-r--r--lib/cpumask_kunit.c155
-rw-r--r--lib/crc-ccitt.c123
-rw-r--r--lib/crc-itu-t.c67
-rw-r--r--lib/crc-t10dif.c131
-rw-r--r--lib/crc16.c65
-rw-r--r--lib/crc32.c344
-rw-r--r--lib/crc32defs.h59
-rw-r--r--lib/crc32test.c852
-rw-r--r--lib/crc4.c44
-rw-r--r--lib/crc64-rocksoft.c126
-rw-r--r--lib/crc64.c85
-rw-r--r--lib/crc7.c74
-rw-r--r--lib/crc8.c86
-rw-r--r--lib/crypto/Kconfig141
-rw-r--r--lib/crypto/Makefile57
-rw-r--r--lib/crypto/aes.c356
-rw-r--r--lib/crypto/aesgcm.c727
-rw-r--r--lib/crypto/arc4.c74
-rw-r--r--lib/crypto/blake2s-generic.c110
-rw-r--r--lib/crypto/blake2s-selftest.c651
-rw-r--r--lib/crypto/blake2s.c71
-rw-r--r--lib/crypto/chacha.c114
-rw-r--r--lib/crypto/chacha20poly1305-selftest.c9082
-rw-r--r--lib/crypto/chacha20poly1305.c373
-rw-r--r--lib/crypto/curve25519-fiat32.c864
-rw-r--r--lib/crypto/curve25519-generic.c24
-rw-r--r--lib/crypto/curve25519-hacl64.c786
-rw-r--r--lib/crypto/curve25519-selftest.c1321
-rw-r--r--lib/crypto/curve25519.c33
-rw-r--r--lib/crypto/des.c902
-rw-r--r--lib/crypto/gf128mul.c436
-rw-r--r--lib/crypto/libchacha.c35
-rw-r--r--lib/crypto/memneq.c173
-rw-r--r--lib/crypto/mpi/Makefile30
-rw-r--r--lib/crypto/mpi/ec.c1509
-rw-r--r--lib/crypto/mpi/generic_mpih-add1.c48
-rw-r--r--lib/crypto/mpi/generic_mpih-lshift.c50
-rw-r--r--lib/crypto/mpi/generic_mpih-mul1.c44
-rw-r--r--lib/crypto/mpi/generic_mpih-mul2.c47
-rw-r--r--lib/crypto/mpi/generic_mpih-mul3.c48
-rw-r--r--lib/crypto/mpi/generic_mpih-rshift.c50
-rw-r--r--lib/crypto/mpi/generic_mpih-sub1.c47
-rw-r--r--lib/crypto/mpi/longlong.h1361
-rw-r--r--lib/crypto/mpi/mpi-add.c155
-rw-r--r--lib/crypto/mpi/mpi-bit.c308
-rw-r--r--lib/crypto/mpi/mpi-cmp.c98
-rw-r--r--lib/crypto/mpi/mpi-div.c234
-rw-r--r--lib/crypto/mpi/mpi-inline.h109
-rw-r--r--lib/crypto/mpi/mpi-internal.h232
-rw-r--r--lib/crypto/mpi/mpi-inv.c143
-rw-r--r--lib/crypto/mpi/mpi-mod.c157
-rw-r--r--lib/crypto/mpi/mpi-mul.c92
-rw-r--r--lib/crypto/mpi/mpi-pow.c314
-rw-r--r--lib/crypto/mpi/mpi-sub-ui.c78
-rw-r--r--lib/crypto/mpi/mpicoder.c752
-rw-r--r--lib/crypto/mpi/mpih-cmp.c43
-rw-r--r--lib/crypto/mpi/mpih-div.c517
-rw-r--r--lib/crypto/mpi/mpih-mul.c509
-rw-r--r--lib/crypto/mpi/mpiutil.c330
-rw-r--r--lib/crypto/poly1305-donna32.c205
-rw-r--r--lib/crypto/poly1305-donna64.c184
-rw-r--r--lib/crypto/poly1305.c78
-rw-r--r--lib/crypto/sha1.c140
-rw-r--r--lib/crypto/sha256.c168
-rw-r--r--lib/crypto/utils.c88
-rw-r--r--lib/ctype.c38
-rw-r--r--lib/debug_info.c27
-rw-r--r--lib/debug_locks.c49
-rw-r--r--lib/debugobjects.c1450
-rw-r--r--lib/dec_and_lock.c82
-rw-r--r--lib/decompress.c84
-rw-r--r--lib/decompress_bunzip2.c756
-rw-r--r--lib/decompress_inflate.c219
-rw-r--r--lib/decompress_unlz4.c217
-rw-r--r--lib/decompress_unlzma.c679
-rw-r--r--lib/decompress_unlzo.c286
-rw-r--r--lib/decompress_unxz.c409
-rw-r--r--lib/decompress_unzstd.c352
-rw-r--r--lib/devmem_is_allowed.c28
-rw-r--r--lib/devres.c602
-rw-r--r--lib/dhry.h358
-rw-r--r--lib/dhry_1.c290
-rw-r--r--lib/dhry_2.c175
-rw-r--r--lib/dhry_run.c87
-rw-r--r--lib/digsig.c279
-rw-r--r--lib/dim/Makefile7
-rw-r--r--lib/dim/dim.c84
-rw-r--r--lib/dim/net_dim.c247
-rw-r--r--lib/dim/rdma_dim.c109
-rw-r--r--lib/dump_stack.c115
-rw-r--r--lib/dynamic_debug.c1491
-rw-r--r--lib/dynamic_queue_limits.c138
-rw-r--r--lib/earlycpio.c141
-rw-r--r--lib/errname.c224
-rw-r--r--lib/error-inject.c239
-rw-r--r--lib/errseq.c207
-rw-r--r--lib/extable.c118
-rw-r--r--lib/fault-inject-usercopy.c39
-rw-r--r--lib/fault-inject.c437
-rw-r--r--lib/fdt.c2
-rw-r--r--lib/fdt_addresses.c2
-rw-r--r--lib/fdt_empty_tree.c2
-rw-r--r--lib/fdt_ro.c2
-rw-r--r--lib/fdt_rw.c2
-rw-r--r--lib/fdt_strerror.c2
-rw-r--r--lib/fdt_sw.c2
-rw-r--r--lib/fdt_wip.c2
-rw-r--r--lib/find_bit.c270
-rw-r--r--lib/find_bit_benchmark.c197
-rw-r--r--lib/flex_proportions.c278
-rw-r--r--lib/fonts/Kconfig144
-rw-r--r--lib/fonts/Makefile22
-rw-r--r--lib/fonts/font_10x18.c5147
-rw-r--r--lib/fonts/font_6x10.c3089
-rw-r--r--lib/fonts/font_6x11.c3353
-rw-r--r--lib/fonts/font_6x8.c2577
-rw-r--r--lib/fonts/font_7x14.c4119
-rw-r--r--lib/fonts/font_8x16.c4634
-rw-r--r--lib/fonts/font_8x8.c2584
-rw-r--r--lib/fonts/font_acorn_8x8.c280
-rw-r--r--lib/fonts/font_mini_4x6.c2159
-rw-r--r--lib/fonts/font_pearl_8x8.c2588
-rw-r--r--lib/fonts/font_sun12x22.c6166
-rw-r--r--lib/fonts/font_sun8x16.c278
-rw-r--r--lib/fonts/font_ter16x32.c2072
-rw-r--r--lib/fonts/fonts.c153
-rw-r--r--lib/fortify_kunit.c331
-rw-r--r--lib/gen_crc32table.c142
-rw-r--r--lib/gen_crc64table.c95
-rw-r--r--lib/genalloc.c909
-rw-r--r--lib/generic-radix-tree.c248
-rw-r--r--lib/glob.c123
-rw-r--r--lib/globtest.c167
-rw-r--r--lib/group_cpus.c439
-rw-r--r--lib/hashtable_test.c317
-rw-r--r--lib/hexdump.c295
-rw-r--r--lib/hweight.c68
-rw-r--r--lib/idr.c599
-rw-r--r--lib/inflate.c1310
-rw-r--r--lib/interval_tree.c149
-rw-r--r--lib/interval_tree_test.c139
-rw-r--r--lib/iomap.c429
-rw-r--r--lib/iomap_copy.c79
-rw-r--r--lib/iommu-helper.c29
-rw-r--r--lib/iov_iter.c1862
-rw-r--r--lib/irq_poll.c219
-rw-r--r--lib/irq_regs.c14
-rw-r--r--lib/is_signed_type_kunit.c49
-rw-r--r--lib/is_single_threaded.c54
-rw-r--r--lib/kasprintf.c64
-rw-r--r--lib/kfifo.c592
-rw-r--r--lib/klist.c407
-rw-r--r--lib/kobject.c1127
-rw-r--r--lib/kobject_uevent.c815
-rw-r--r--lib/kstrtox.c431
-rw-r--r--lib/kstrtox.h11
-rw-r--r--lib/kunit/.kunitconfig3
-rw-r--r--lib/kunit/Kconfig73
-rw-r--r--lib/kunit/Makefile26
-rw-r--r--lib/kunit/assert.c274
-rw-r--r--lib/kunit/attributes.c414
-rw-r--r--lib/kunit/debugfs.c129
-rw-r--r--lib/kunit/debugfs.h30
-rw-r--r--lib/kunit/executor.c356
-rw-r--r--lib/kunit/executor_test.c293
-rw-r--r--lib/kunit/hooks-impl.h31
-rw-r--r--lib/kunit/hooks.c21
-rw-r--r--lib/kunit/kunit-example-test.c289
-rw-r--r--lib/kunit/kunit-test.c664
-rw-r--r--lib/kunit/resource.c178
-rw-r--r--lib/kunit/static_stub.c123
-rw-r--r--lib/kunit/string-stream-test.c53
-rw-r--r--lib/kunit/string-stream.c155
-rw-r--r--lib/kunit/string-stream.h50
-rw-r--r--lib/kunit/test.c929
-rw-r--r--lib/kunit/try-catch-impl.h27
-rw-r--r--lib/kunit/try-catch.c98
-rw-r--r--lib/kunit_iov_iter.c777
-rw-r--r--lib/libcrc32c.c74
-rw-r--r--lib/linear_ranges.c276
-rw-r--r--lib/list-test.c1502
-rw-r--r--lib/list_debug.c72
-rw-r--r--lib/list_sort.c253
-rw-r--r--lib/livepatch/Makefile14
-rw-r--r--lib/livepatch/test_klp_atomic_replace.c57
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c70
-rw-r--r--lib/livepatch/test_klp_callbacks_demo.c121
-rw-r--r--lib/livepatch/test_klp_callbacks_demo2.c93
-rw-r--r--lib/livepatch/test_klp_callbacks_mod.c24
-rw-r--r--lib/livepatch/test_klp_livepatch.c51
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c301
-rw-r--r--lib/livepatch/test_klp_state.c162
-rw-r--r--lib/livepatch/test_klp_state2.c191
-rw-r--r--lib/livepatch/test_klp_state3.c5
-rw-r--r--lib/llist.c88
-rw-r--r--lib/locking-selftest-hardirq.h10
-rw-r--r--lib/locking-selftest-mutex.h12
-rw-r--r--lib/locking-selftest-rlock-hardirq.h2
-rw-r--r--lib/locking-selftest-rlock-softirq.h2
-rw-r--r--lib/locking-selftest-rlock.h15
-rw-r--r--lib/locking-selftest-rsem.h15
-rw-r--r--lib/locking-selftest-rtmutex.h12
-rw-r--r--lib/locking-selftest-softirq.h10
-rw-r--r--lib/locking-selftest-spin-hardirq.h2
-rw-r--r--lib/locking-selftest-spin-softirq.h2
-rw-r--r--lib/locking-selftest-spin.h12
-rw-r--r--lib/locking-selftest-wlock-hardirq.h2
-rw-r--r--lib/locking-selftest-wlock-softirq.h2
-rw-r--r--lib/locking-selftest-wlock.h15
-rw-r--r--lib/locking-selftest-wsem.h15
-rw-r--r--lib/locking-selftest.c2950
-rw-r--r--lib/lockref.c193
-rw-r--r--lib/logic_iomem.c321
-rw-r--r--lib/logic_pio.c317
-rw-r--r--lib/lru_cache.c622
-rw-r--r--lib/lshrdi3.c32
-rw-r--r--lib/lz4/Makefile6
-rw-r--r--lib/lz4/lz4_compress.c940
-rw-r--r--lib/lz4/lz4_decompress.c720
-rw-r--r--lib/lz4/lz4defs.h247
-rw-r--r--lib/lz4/lz4hc_compress.c768
-rw-r--r--lib/lzo/Makefile6
-rw-r--r--lib/lzo/lzo1x_compress.c398
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c296
-rw-r--r--lib/lzo/lzodefs.h71
-rw-r--r--lib/maple_tree.c7268
-rw-r--r--lib/math/Kconfig17
-rw-r--r--lib/math/Makefile9
-rw-r--r--lib/math/cordic.c92
-rw-r--r--lib/math/div64.c225
-rw-r--r--lib/math/gcd.c85
-rw-r--r--lib/math/int_log.c133
-rw-r--r--lib/math/int_pow.c32
-rw-r--r--lib/math/int_sqrt.c71
-rw-r--r--lib/math/lcm.c26
-rw-r--r--lib/math/prime_numbers.c316
-rw-r--r--lib/math/rational-test.c56
-rw-r--r--lib/math/rational.c111
-rw-r--r--lib/math/reciprocal_div.c73
-rw-r--r--lib/math/test_div64.c249
-rw-r--r--lib/memcat_p.c34
-rw-r--r--lib/memcpy_kunit.c569
-rw-r--r--lib/memory-notifier-error-inject.c49
-rw-r--r--lib/memregion.c19
-rw-r--r--lib/memweight.c39
-rw-r--r--lib/muldi3.c60
-rw-r--r--lib/net_utils.c29
-rw-r--r--lib/netdev-notifier-error-inject.c56
-rw-r--r--lib/nlattr.c1143
-rw-r--r--lib/nmi_backtrace.c123
-rw-r--r--lib/notifier-error-inject.c99
-rw-r--r--lib/notifier-error-inject.h25
-rw-r--r--lib/objagg.c1051
-rw-r--r--lib/of-reconfig-notifier-error-inject.c52
-rw-r--r--lib/oid_registry.c198
-rw-r--r--lib/once.c98
-rw-r--r--lib/overflow_kunit.c1149
-rw-r--r--lib/packing.c201
-rw-r--r--lib/parman.c375
-rw-r--r--lib/parser.c363
-rw-r--r--lib/pci_iomap.c180
-rw-r--r--lib/percpu-refcount.c479
-rw-r--r--lib/percpu_counter.c294
-rw-r--r--lib/percpu_test.c139
-rw-r--r--lib/pldmfw/Makefile2
-rw-r--r--lib/pldmfw/pldmfw.c878
-rw-r--r--lib/pldmfw/pldmfw_private.h238
-rw-r--r--lib/plist.c263
-rw-r--r--lib/pm-notifier-error-inject.c50
-rw-r--r--lib/polynomial.c108
-rw-r--r--lib/radix-tree.c1608
-rw-r--r--lib/radix-tree.h8
-rw-r--r--lib/raid6/.gitignore8
-rw-r--r--lib/raid6/Makefile95
-rw-r--r--lib/raid6/algos.c295
-rw-r--r--lib/raid6/altivec.uc132
-rw-r--r--lib/raid6/avx2.c470
-rw-r--r--lib/raid6/avx512.c564
-rw-r--r--lib/raid6/int.uc156
-rw-r--r--lib/raid6/loongarch.h38
-rw-r--r--lib/raid6/loongarch_simd.c422
-rw-r--r--lib/raid6/mktables.c177
-rw-r--r--lib/raid6/mmx.c139
-rw-r--r--lib/raid6/neon.c67
-rw-r--r--lib/raid6/neon.h22
-rw-r--r--lib/raid6/neon.uc153
-rw-r--r--lib/raid6/recov.c135
-rw-r--r--lib/raid6/recov_avx2.c313
-rw-r--r--lib/raid6/recov_avx512.c383
-rw-r--r--lib/raid6/recov_loongarch_simd.c513
-rw-r--r--lib/raid6/recov_neon.c100
-rw-r--r--lib/raid6/recov_neon_inner.c111
-rw-r--r--lib/raid6/recov_s390xc.c117
-rw-r--r--lib/raid6/recov_ssse3.c328
-rw-r--r--lib/raid6/s390vx.uc170
-rw-r--r--lib/raid6/sse1.c159
-rw-r--r--lib/raid6/sse2.c480
-rw-r--r--lib/raid6/test/.gitignore3
-rw-r--r--lib/raid6/test/Makefile151
-rw-r--r--lib/raid6/test/test.c152
-rw-r--r--lib/raid6/unroll.awk20
-rw-r--r--lib/raid6/vpermxor.uc105
-rw-r--r--lib/raid6/x86.h75
-rw-r--r--lib/random32.c300
-rw-r--r--lib/ratelimit.c76
-rw-r--r--lib/rbtree.c630
-rw-r--r--lib/rbtree_test.c408
-rw-r--r--lib/rcuref.c281
-rw-r--r--lib/reed_solomon/Makefile7
-rw-r--r--lib/reed_solomon/decode_rs.c326
-rw-r--r--lib/reed_solomon/encode_rs.c47
-rw-r--r--lib/reed_solomon/reed_solomon.c424
-rw-r--r--lib/reed_solomon/test_rslib.c518
-rw-r--r--lib/ref_tracker.c275
-rw-r--r--lib/refcount.c186
-rw-r--r--lib/rhashtable.c1245
-rw-r--r--lib/sbitmap.c769
-rw-r--r--lib/scatterlist.c1368
-rw-r--r--lib/seq_buf.c429
-rw-r--r--lib/sg_pool.c180
-rw-r--r--lib/sg_split.c202
-rw-r--r--lib/siphash.c538
-rw-r--r--lib/siphash_kunit.c197
-rw-r--r--lib/slub_kunit.c190
-rw-r--r--lib/smp_processor_id.c68
-rw-r--r--lib/sort.c292
-rw-r--r--lib/stackdepot.c541
-rw-r--r--lib/stackinit_kunit.c461
-rw-r--r--lib/stmp_device.c77
-rw-r--r--lib/strcat_kunit.c104
-rw-r--r--lib/string.c880
-rw-r--r--lib/string_helpers.c1049
-rw-r--r--lib/strncpy_from_user.c146
-rw-r--r--lib/strnlen_user.c120
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/syscall.c88
-rw-r--r--lib/test-kstrtox.c735
-rw-r--r--lib/test-string_helpers.c609
-rw-r--r--lib/test_bitmap.c1256
-rw-r--r--lib/test_bitops.c111
-rw-r--r--lib/test_bits.c75
-rw-r--r--lib/test_blackhole_dev.c100
-rw-r--r--lib/test_bpf.c15356
-rw-r--r--lib/test_debug_virtual.c51
-rw-r--r--lib/test_dynamic_debug.c165
-rw-r--r--lib/test_firmware.c1569
-rw-r--r--lib/test_fortify/read_overflow-memchr.c5
-rw-r--r--lib/test_fortify/read_overflow-memchr_inv.c5
-rw-r--r--lib/test_fortify/read_overflow-memcmp.c5
-rw-r--r--lib/test_fortify/read_overflow-memscan.c5
-rw-r--r--lib/test_fortify/read_overflow2-memcmp.c5
-rw-r--r--lib/test_fortify/read_overflow2-memcpy.c5
-rw-r--r--lib/test_fortify/read_overflow2-memmove.c5
-rw-r--r--lib/test_fortify/read_overflow2_field-memcpy.c5
-rw-r--r--lib/test_fortify/read_overflow2_field-memmove.c5
-rw-r--r--lib/test_fortify/test_fortify.h35
-rw-r--r--lib/test_fortify/write_overflow-memcpy.c5
-rw-r--r--lib/test_fortify/write_overflow-memmove.c5
-rw-r--r--lib/test_fortify/write_overflow-memset.c5
-rw-r--r--lib/test_fortify/write_overflow-strcpy-lit.c5
-rw-r--r--lib/test_fortify/write_overflow-strcpy.c5
-rw-r--r--lib/test_fortify/write_overflow-strlcpy-src.c5
-rw-r--r--lib/test_fortify/write_overflow-strlcpy.c5
-rw-r--r--lib/test_fortify/write_overflow-strncpy-src.c5
-rw-r--r--lib/test_fortify/write_overflow-strncpy.c5
-rw-r--r--lib/test_fortify/write_overflow-strscpy.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memcpy.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memmove.c5
-rw-r--r--lib/test_fortify/write_overflow_field-memset.c5
-rw-r--r--lib/test_fprobe.c275
-rw-r--r--lib/test_fpu.c89
-rw-r--r--lib/test_free_pages.c47
-rw-r--r--lib/test_hash.c238
-rw-r--r--lib/test_hexdump.c256
-rw-r--r--lib/test_hmm.c1553
-rw-r--r--lib/test_hmm_uapi.h77
-rw-r--r--lib/test_ida.c217
-rw-r--r--lib/test_kmod.c1222
-rw-r--r--lib/test_kprobes.c403
-rw-r--r--lib/test_linear_ranges.c219
-rw-r--r--lib/test_list_sort.c122
-rw-r--r--lib/test_lockup.c620
-rw-r--r--lib/test_maple_tree.c3878
-rw-r--r--lib/test_memcat_p.c115
-rw-r--r--lib/test_meminit.c439
-rw-r--r--lib/test_min_heap.c194
-rw-r--r--lib/test_module.c34
-rw-r--r--lib/test_objagg.c1021
-rw-r--r--lib/test_parman.c395
-rw-r--r--lib/test_printf.c827
-rw-r--r--lib/test_ref_tracker.c115
-rw-r--r--lib/test_rhashtable.c813
-rw-r--r--lib/test_scanf.c813
-rw-r--r--lib/test_sort.c48
-rw-r--r--lib/test_static_key_base.c60
-rw-r--r--lib/test_static_keys.c239
-rw-r--r--lib/test_string.c257
-rw-r--r--lib/test_sysctl.c256
-rw-r--r--lib/test_ubsan.c122
-rw-r--r--lib/test_user_copy.c331
-rw-r--r--lib/test_uuid.c133
-rw-r--r--lib/test_vmalloc.c612
-rw-r--r--lib/test_xarray.c1836
-rw-r--r--lib/textsearch.c323
-rw-r--r--lib/timerqueue.c84
-rw-r--r--lib/trace_readwrite.c47
-rw-r--r--lib/ts_bm.c222
-rw-r--r--lib/ts_fsm.c337
-rw-r--r--lib/ts_kmp.c153
-rw-r--r--lib/ubsan.c454
-rw-r--r--lib/ubsan.h138
-rw-r--r--lib/ucmpdi2.c23
-rw-r--r--lib/ucs2_string.c116
-rw-r--r--lib/usercopy.c100
-rw-r--r--lib/uuid.c133
-rw-r--r--lib/vdso/Kconfig33
-rw-r--r--lib/vdso/Makefile17
-rw-r--r--lib/vdso/gettimeofday.c441
-rw-r--r--lib/vsprintf.c3727
-rw-r--r--lib/win_minmax.c99
-rw-r--r--lib/xarray.c2309
-rw-r--r--lib/xxhash.c500
-rw-r--r--lib/xz/Kconfig71
-rw-r--r--lib/xz/Makefile6
-rw-r--r--lib/xz/xz_crc32.c59
-rw-r--r--lib/xz/xz_dec_bcj.c574
-rw-r--r--lib/xz/xz_dec_lzma2.c1344
-rw-r--r--lib/xz/xz_dec_stream.c837
-rw-r--r--lib/xz/xz_dec_syms.c33
-rw-r--r--lib/xz/xz_dec_test.c220
-rw-r--r--lib/xz/xz_lzma2.h204
-rw-r--r--lib/xz/xz_private.h163
-rw-r--r--lib/xz/xz_stream.h62
-rw-r--r--lib/zlib_deflate/Makefile12
-rw-r--r--lib/zlib_deflate/deflate.c1153
-rw-r--r--lib/zlib_deflate/deflate_syms.c20
-rw-r--r--lib/zlib_deflate/deftree.c1059
-rw-r--r--lib/zlib_deflate/defutil.h443
-rw-r--r--lib/zlib_dfltcc/Makefile11
-rw-r--r--lib/zlib_dfltcc/dfltcc.c40
-rw-r--r--lib/zlib_dfltcc/dfltcc.h124
-rw-r--r--lib/zlib_dfltcc/dfltcc_deflate.c313
-rw-r--r--lib/zlib_dfltcc/dfltcc_deflate.h21
-rw-r--r--lib/zlib_dfltcc/dfltcc_inflate.c154
-rw-r--r--lib/zlib_dfltcc/dfltcc_inflate.h37
-rw-r--r--lib/zlib_dfltcc/dfltcc_util.h103
-rw-r--r--lib/zlib_inflate/Makefile20
-rw-r--r--lib/zlib_inflate/inffast.c341
-rw-r--r--lib/zlib_inflate/inffast.h11
-rw-r--r--lib/zlib_inflate/inffixed.h94
-rw-r--r--lib/zlib_inflate/inflate.c814
-rw-r--r--lib/zlib_inflate/inflate.h119
-rw-r--r--lib/zlib_inflate/inflate_syms.c21
-rw-r--r--lib/zlib_inflate/inftrees.c315
-rw-r--r--lib/zlib_inflate/inftrees.h59
-rw-r--r--lib/zlib_inflate/infutil.c49
-rw-r--r--lib/zlib_inflate/infutil.h39
-rw-r--r--lib/zstd/Makefile43
-rw-r--r--lib/zstd/common/bitstream.h446
-rw-r--r--lib/zstd/common/compiler.h184
-rw-r--r--lib/zstd/common/cpu.h194
-rw-r--r--lib/zstd/common/debug.c24
-rw-r--r--lib/zstd/common/debug.h101
-rw-r--r--lib/zstd/common/entropy_common.c357
-rw-r--r--lib/zstd/common/error_private.c56
-rw-r--r--lib/zstd/common/error_private.h145
-rw-r--r--lib/zstd/common/fse.h711
-rw-r--r--lib/zstd/common/fse_decompress.c390
-rw-r--r--lib/zstd/common/huf.h358
-rw-r--r--lib/zstd/common/mem.h261
-rw-r--r--lib/zstd/common/portability_macros.h93
-rw-r--r--lib/zstd/common/zstd_common.c83
-rw-r--r--lib/zstd/common/zstd_deps.h107
-rw-r--r--lib/zstd/common/zstd_internal.h443
-rw-r--r--lib/zstd/compress/clevels.h132
-rw-r--r--lib/zstd/compress/fse_compress.c668
-rw-r--r--lib/zstd/compress/hist.c165
-rw-r--r--lib/zstd/compress/hist.h75
-rw-r--r--lib/zstd/compress/huf_compress.c1335
-rw-r--r--lib/zstd/compress/zstd_compress.c6127
-rw-r--r--lib/zstd/compress/zstd_compress_internal.h1399
-rw-r--r--lib/zstd/compress/zstd_compress_literals.c159
-rw-r--r--lib/zstd/compress/zstd_compress_literals.h31
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.c442
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.h54
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.c573
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.h32
-rw-r--r--lib/zstd/compress/zstd_cwksp.h595
-rw-r--r--lib/zstd/compress/zstd_double_fast.c696
-rw-r--r--lib/zstd/compress/zstd_double_fast.h32
-rw-r--r--lib/zstd/compress/zstd_fast.c675
-rw-r--r--lib/zstd/compress/zstd_fast.h31
-rw-r--r--lib/zstd/compress/zstd_lazy.c2102
-rw-r--r--lib/zstd/compress/zstd_lazy.h119
-rw-r--r--lib/zstd/compress/zstd_ldm.c724
-rw-r--r--lib/zstd/compress/zstd_ldm.h111
-rw-r--r--lib/zstd/compress/zstd_ldm_geartab.h106
-rw-r--r--lib/zstd/compress/zstd_opt.c1446
-rw-r--r--lib/zstd/compress/zstd_opt.h50
-rw-r--r--lib/zstd/decompress/huf_decompress.c1740
-rw-r--r--lib/zstd/decompress/zstd_ddict.c241
-rw-r--r--lib/zstd/decompress/zstd_ddict.h44
-rw-r--r--lib/zstd/decompress/zstd_decompress.c2150
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.c2072
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.h68
-rw-r--r--lib/zstd/decompress/zstd_decompress_internal.h228
-rw-r--r--lib/zstd/decompress_sources.h34
-rw-r--r--lib/zstd/zstd_common_module.c32
-rw-r--r--lib/zstd/zstd_compress_module.c164
-rw-r--r--lib/zstd/zstd_decompress_module.c105
560 files changed, 244482 insertions, 0 deletions
diff --git a/lib/.gitignore b/lib/.gitignore
new file mode 100644
index 000000000..54596b634
--- /dev/null
+++ b/lib/.gitignore
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/crc32table.h
+/crc64table.h
+/default.bconf
+/gen_crc32table
+/gen_crc64table
+/oid_registry_data.c
+/test_fortify.log
+/test_fortify/*.log
diff --git a/lib/842/842.h b/lib/842/842.h
new file mode 100644
index 000000000..7b1f581a2
--- /dev/null
+++ b/lib/842/842.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __842_H__
+#define __842_H__
+
+/* The 842 compressed format is made up of multiple blocks, each of
+ * which have the format:
+ *
+ * <template>[arg1][arg2][arg3][arg4]
+ *
+ * where there are between 0 and 4 template args, depending on the specific
+ * template operation. For normal operations, each arg is either a specific
+ * number of data bytes to add to the output buffer, or an index pointing
+ * to a previously-written number of data bytes to copy to the output buffer.
+ *
+ * The template code is a 5-bit value. This code indicates what to do with
+ * the following data. Template codes from 0 to 0x19 should use the template
+ * table, the static "decomp_ops" table used in decompress. For each template
+ * (table row), there are between 1 and 4 actions; each action corresponds to
+ * an arg following the template code bits. Each action is either a "data"
+ * type action, or a "index" type action, and each action results in 2, 4, or 8
+ * bytes being written to the output buffer. Each template (i.e. all actions
+ * in the table row) will add up to 8 bytes being written to the output buffer.
+ * Any row with less than 4 actions is padded with noop actions, indicated by
+ * N0 (for which there is no corresponding arg in the compressed data buffer).
+ *
+ * "Data" actions, indicated in the table by D2, D4, and D8, mean that the
+ * corresponding arg is 2, 4, or 8 bytes, respectively, in the compressed data
+ * buffer should be copied directly to the output buffer.
+ *
+ * "Index" actions, indicated in the table by I2, I4, and I8, mean the
+ * corresponding arg is an index parameter that points to, respectively, a 2,
+ * 4, or 8 byte value already in the output buffer, that should be copied to
+ * the end of the output buffer. Essentially, the index points to a position
+ * in a ring buffer that contains the last N bytes of output buffer data.
+ * The number of bits for each index's arg are: 8 bits for I2, 9 bits for I4,
+ * and 8 bits for I8. Since each index points to a 2, 4, or 8 byte section,
+ * this means that I2 can reference 512 bytes ((2^8 bits = 256) * 2 bytes), I4
+ * can reference 2048 bytes ((2^9 = 512) * 4 bytes), and I8 can reference 2048
+ * bytes ((2^8 = 256) * 8 bytes). Think of it as a kind-of ring buffer for
+ * each of I2, I4, and I8 that are updated for each byte written to the output
+ * buffer. In this implementation, the output buffer is directly used for each
+ * index; there is no additional memory required. Note that the index is into
+ * a ring buffer, not a sliding window; for example, if there have been 260
+ * bytes written to the output buffer, an I2 index of 0 would index to byte 256
+ * in the output buffer, while an I2 index of 16 would index to byte 16 in the
+ * output buffer.
+ *
+ * There are also 3 special template codes; 0x1b for "repeat", 0x1c for
+ * "zeros", and 0x1e for "end". The "repeat" operation is followed by a 6 bit
+ * arg N indicating how many times to repeat. The last 8 bytes written to the
+ * output buffer are written again to the output buffer, N + 1 times. The
+ * "zeros" operation, which has no arg bits, writes 8 zeros to the output
+ * buffer. The "end" operation, which also has no arg bits, signals the end
+ * of the compressed data. There may be some number of padding (don't care,
+ * but usually 0) bits after the "end" operation bits, to fill the buffer
+ * length to a specific byte multiple (usually a multiple of 8, 16, or 32
+ * bytes).
+ *
+ * This software implementation also uses one of the undefined template values,
+ * 0x1d as a special "short data" template code, to represent less than 8 bytes
+ * of uncompressed data. It is followed by a 3 bit arg N indicating how many
+ * data bytes will follow, and then N bytes of data, which should be copied to
+ * the output buffer. This allows the software 842 compressor to accept input
+ * buffers that are not an exact multiple of 8 bytes long. However, those
+ * compressed buffers containing this sw-only template will be rejected by
+ * the 842 hardware decompressor, and must be decompressed with this software
+ * library. The 842 software compression module includes a parameter to
+ * disable using this sw-only "short data" template, and instead simply
+ * reject any input buffer that is not a multiple of 8 bytes long.
+ *
+ * After all actions for each operation code are processed, another template
+ * code is in the next 5 bits. The decompression ends once the "end" template
+ * code is detected.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
+
+#include <linux/sw842.h>
+
+/* special templates */
+#define OP_REPEAT (0x1B)
+#define OP_ZEROS (0x1C)
+#define OP_END (0x1E)
+
+/* sw only template - this is not in the hw design; it's used only by this
+ * software compressor and decompressor, to allow input buffers that aren't
+ * a multiple of 8.
+ */
+#define OP_SHORT_DATA (0x1D)
+
+/* additional bits of each op param */
+#define OP_BITS (5)
+#define REPEAT_BITS (6)
+#define SHORT_DATA_BITS (3)
+#define I2_BITS (8)
+#define I4_BITS (9)
+#define I8_BITS (8)
+#define CRC_BITS (32)
+
+#define REPEAT_BITS_MAX (0x3f)
+#define SHORT_DATA_BITS_MAX (0x7)
+
+/* Arbitrary values used to indicate action */
+#define OP_ACTION (0x70)
+#define OP_ACTION_INDEX (0x10)
+#define OP_ACTION_DATA (0x20)
+#define OP_ACTION_NOOP (0x40)
+#define OP_AMOUNT (0x0f)
+#define OP_AMOUNT_0 (0x00)
+#define OP_AMOUNT_2 (0x02)
+#define OP_AMOUNT_4 (0x04)
+#define OP_AMOUNT_8 (0x08)
+
+#define D2 (OP_ACTION_DATA | OP_AMOUNT_2)
+#define D4 (OP_ACTION_DATA | OP_AMOUNT_4)
+#define D8 (OP_ACTION_DATA | OP_AMOUNT_8)
+#define I2 (OP_ACTION_INDEX | OP_AMOUNT_2)
+#define I4 (OP_ACTION_INDEX | OP_AMOUNT_4)
+#define I8 (OP_ACTION_INDEX | OP_AMOUNT_8)
+#define N0 (OP_ACTION_NOOP | OP_AMOUNT_0)
+
+/* the max of the regular templates - not including the special templates */
+#define OPS_MAX (0x1a)
+
+#endif
diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c
new file mode 100644
index 000000000..c02baa416
--- /dev/null
+++ b/lib/842/842_compress.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * 842 Software Compression
+ *
+ * Copyright (C) 2015 Dan Streetman, IBM Corp
+ *
+ * See 842.h for details of the 842 compressed format.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define MODULE_NAME "842_compress"
+
+#include <linux/hashtable.h>
+
+#include "842.h"
+#include "842_debugfs.h"
+
+#define SW842_HASHTABLE8_BITS (10)
+#define SW842_HASHTABLE4_BITS (11)
+#define SW842_HASHTABLE2_BITS (10)
+
+/* By default, we allow compressing input buffers of any length, but we must
+ * use the non-standard "short data" template so the decompressor can correctly
+ * reproduce the uncompressed data buffer at the right length. However the
+ * hardware 842 compressor will not recognize the "short data" template, and
+ * will fail to decompress any compressed buffer containing it (I have no idea
+ * why anyone would want to use software to compress and hardware to decompress
+ * but that's beside the point). This parameter forces the compression
+ * function to simply reject any input buffer that isn't a multiple of 8 bytes
+ * long, instead of using the "short data" template, so that all compressed
+ * buffers produced by this function will be decompressable by the 842 hardware
+ * decompressor. Unless you have a specific need for that, leave this disabled
+ * so that any length buffer can be compressed.
+ */
+static bool sw842_strict;
+module_param_named(strict, sw842_strict, bool, 0644);
+
+static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */
+ { I8, N0, N0, N0, 0x19 }, /* 8 */
+ { I4, I4, N0, N0, 0x18 }, /* 18 */
+ { I4, I2, I2, N0, 0x17 }, /* 25 */
+ { I2, I2, I4, N0, 0x13 }, /* 25 */
+ { I2, I2, I2, I2, 0x12 }, /* 32 */
+ { I4, I2, D2, N0, 0x16 }, /* 33 */
+ { I4, D2, I2, N0, 0x15 }, /* 33 */
+ { I2, D2, I4, N0, 0x0e }, /* 33 */
+ { D2, I2, I4, N0, 0x09 }, /* 33 */
+ { I2, I2, I2, D2, 0x11 }, /* 40 */
+ { I2, I2, D2, I2, 0x10 }, /* 40 */
+ { I2, D2, I2, I2, 0x0d }, /* 40 */
+ { D2, I2, I2, I2, 0x08 }, /* 40 */
+ { I4, D4, N0, N0, 0x14 }, /* 41 */
+ { D4, I4, N0, N0, 0x04 }, /* 41 */
+ { I2, I2, D4, N0, 0x0f }, /* 48 */
+ { I2, D2, I2, D2, 0x0c }, /* 48 */
+ { I2, D4, I2, N0, 0x0b }, /* 48 */
+ { D2, I2, I2, D2, 0x07 }, /* 48 */
+ { D2, I2, D2, I2, 0x06 }, /* 48 */
+ { D4, I2, I2, N0, 0x03 }, /* 48 */
+ { I2, D2, D4, N0, 0x0a }, /* 56 */
+ { D2, I2, D4, N0, 0x05 }, /* 56 */
+ { D4, I2, D2, N0, 0x02 }, /* 56 */
+ { D4, D2, I2, N0, 0x01 }, /* 56 */
+ { D8, N0, N0, N0, 0x00 }, /* 64 */
+};
+
+struct sw842_hlist_node8 {
+ struct hlist_node node;
+ u64 data;
+ u8 index;
+};
+
+struct sw842_hlist_node4 {
+ struct hlist_node node;
+ u32 data;
+ u16 index;
+};
+
+struct sw842_hlist_node2 {
+ struct hlist_node node;
+ u16 data;
+ u8 index;
+};
+
+#define INDEX_NOT_FOUND (-1)
+#define INDEX_NOT_CHECKED (-2)
+
+struct sw842_param {
+ u8 *in;
+ u8 *instart;
+ u64 ilen;
+ u8 *out;
+ u64 olen;
+ u8 bit;
+ u64 data8[1];
+ u32 data4[2];
+ u16 data2[4];
+ int index8[1];
+ int index4[2];
+ int index2[4];
+ DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS);
+ DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS);
+ DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS);
+ struct sw842_hlist_node8 node8[1 << I8_BITS];
+ struct sw842_hlist_node4 node4[1 << I4_BITS];
+ struct sw842_hlist_node2 node2[1 << I2_BITS];
+};
+
+#define get_input_data(p, o, b) \
+ be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o))))
+
+#define init_hashtable_nodes(p, b) do { \
+ int _i; \
+ hash_init((p)->htable##b); \
+ for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \
+ (p)->node##b[_i].index = _i; \
+ (p)->node##b[_i].data = 0; \
+ INIT_HLIST_NODE(&(p)->node##b[_i].node); \
+ } \
+} while (0)
+
+#define find_index(p, b, n) ({ \
+ struct sw842_hlist_node##b *_n; \
+ p->index##b[n] = INDEX_NOT_FOUND; \
+ hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \
+ if (p->data##b[n] == _n->data) { \
+ p->index##b[n] = _n->index; \
+ break; \
+ } \
+ } \
+ p->index##b[n] >= 0; \
+})
+
+#define check_index(p, b, n) \
+ ((p)->index##b[n] == INDEX_NOT_CHECKED \
+ ? find_index(p, b, n) \
+ : (p)->index##b[n] >= 0)
+
+#define replace_hash(p, b, i, d) do { \
+ struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \
+ hash_del(&_n->node); \
+ _n->data = (p)->data##b[d]; \
+ pr_debug("add hash index%x %x pos %x data %lx\n", b, \
+ (unsigned int)_n->index, \
+ (unsigned int)((p)->in - (p)->instart), \
+ (unsigned long)_n->data); \
+ hash_add((p)->htable##b, &_n->node, _n->data); \
+} while (0)
+
+static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe };
+
+static int add_bits(struct sw842_param *p, u64 d, u8 n);
+
+static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s)
+{
+ int ret;
+
+ if (n <= s)
+ return -EINVAL;
+
+ ret = add_bits(p, d >> s, n - s);
+ if (ret)
+ return ret;
+ return add_bits(p, d & GENMASK_ULL(s - 1, 0), s);
+}
+
+static int add_bits(struct sw842_param *p, u64 d, u8 n)
+{
+ int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits;
+ u64 o;
+ u8 *out = p->out;
+
+ pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d);
+
+ if (n > 64)
+ return -EINVAL;
+
+ /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0),
+ * or if we're at the end of the output buffer and would write past end
+ */
+ if (bits > 64)
+ return __split_add_bits(p, d, n, 32);
+ else if (p->olen < 8 && bits > 32 && bits <= 56)
+ return __split_add_bits(p, d, n, 16);
+ else if (p->olen < 4 && bits > 16 && bits <= 24)
+ return __split_add_bits(p, d, n, 8);
+
+ if (DIV_ROUND_UP(bits, 8) > p->olen)
+ return -ENOSPC;
+
+ o = *out & bmask[b];
+ d <<= s;
+
+ if (bits <= 8)
+ *out = o | d;
+ else if (bits <= 16)
+ put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out);
+ else if (bits <= 24)
+ put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out);
+ else if (bits <= 32)
+ put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out);
+ else if (bits <= 40)
+ put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out);
+ else if (bits <= 48)
+ put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out);
+ else if (bits <= 56)
+ put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out);
+ else
+ put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out);
+
+ p->bit += n;
+
+ if (p->bit > 7) {
+ p->out += p->bit / 8;
+ p->olen -= p->bit / 8;
+ p->bit %= 8;
+ }
+
+ return 0;
+}
+
+static int add_template(struct sw842_param *p, u8 c)
+{
+ int ret, i, b = 0;
+ u8 *t = comp_ops[c];
+ bool inv = false;
+
+ if (c >= OPS_MAX)
+ return -EINVAL;
+
+ pr_debug("template %x\n", t[4]);
+
+ ret = add_bits(p, t[4], OP_BITS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ pr_debug("op %x\n", t[i]);
+
+ switch (t[i] & OP_AMOUNT) {
+ case OP_AMOUNT_8:
+ if (b)
+ inv = true;
+ else if (t[i] & OP_ACTION_INDEX)
+ ret = add_bits(p, p->index8[0], I8_BITS);
+ else if (t[i] & OP_ACTION_DATA)
+ ret = add_bits(p, p->data8[0], 64);
+ else
+ inv = true;
+ break;
+ case OP_AMOUNT_4:
+ if (b == 2 && t[i] & OP_ACTION_DATA)
+ ret = add_bits(p, get_input_data(p, 2, 32), 32);
+ else if (b != 0 && b != 4)
+ inv = true;
+ else if (t[i] & OP_ACTION_INDEX)
+ ret = add_bits(p, p->index4[b >> 2], I4_BITS);
+ else if (t[i] & OP_ACTION_DATA)
+ ret = add_bits(p, p->data4[b >> 2], 32);
+ else
+ inv = true;
+ break;
+ case OP_AMOUNT_2:
+ if (b != 0 && b != 2 && b != 4 && b != 6)
+ inv = true;
+ if (t[i] & OP_ACTION_INDEX)
+ ret = add_bits(p, p->index2[b >> 1], I2_BITS);
+ else if (t[i] & OP_ACTION_DATA)
+ ret = add_bits(p, p->data2[b >> 1], 16);
+ else
+ inv = true;
+ break;
+ case OP_AMOUNT_0:
+ inv = (b != 8) || !(t[i] & OP_ACTION_NOOP);
+ break;
+ default:
+ inv = true;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ if (inv) {
+ pr_err("Invalid templ %x op %d : %x %x %x %x\n",
+ c, i, t[0], t[1], t[2], t[3]);
+ return -EINVAL;
+ }
+
+ b += t[i] & OP_AMOUNT;
+ }
+
+ if (b != 8) {
+ pr_err("Invalid template %x len %x : %x %x %x %x\n",
+ c, b, t[0], t[1], t[2], t[3]);
+ return -EINVAL;
+ }
+
+ if (sw842_template_counts)
+ atomic_inc(&template_count[t[4]]);
+
+ return 0;
+}
+
+static int add_repeat_template(struct sw842_param *p, u8 r)
+{
+ int ret;
+
+ /* repeat param is 0-based */
+ if (!r || --r > REPEAT_BITS_MAX)
+ return -EINVAL;
+
+ ret = add_bits(p, OP_REPEAT, OP_BITS);
+ if (ret)
+ return ret;
+
+ ret = add_bits(p, r, REPEAT_BITS);
+ if (ret)
+ return ret;
+
+ if (sw842_template_counts)
+ atomic_inc(&template_repeat_count);
+
+ return 0;
+}
+
+static int add_short_data_template(struct sw842_param *p, u8 b)
+{
+ int ret, i;
+
+ if (!b || b > SHORT_DATA_BITS_MAX)
+ return -EINVAL;
+
+ ret = add_bits(p, OP_SHORT_DATA, OP_BITS);
+ if (ret)
+ return ret;
+
+ ret = add_bits(p, b, SHORT_DATA_BITS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < b; i++) {
+ ret = add_bits(p, p->in[i], 8);
+ if (ret)
+ return ret;
+ }
+
+ if (sw842_template_counts)
+ atomic_inc(&template_short_data_count);
+
+ return 0;
+}
+
+static int add_zeros_template(struct sw842_param *p)
+{
+ int ret = add_bits(p, OP_ZEROS, OP_BITS);
+
+ if (ret)
+ return ret;
+
+ if (sw842_template_counts)
+ atomic_inc(&template_zeros_count);
+
+ return 0;
+}
+
+static int add_end_template(struct sw842_param *p)
+{
+ int ret = add_bits(p, OP_END, OP_BITS);
+
+ if (ret)
+ return ret;
+
+ if (sw842_template_counts)
+ atomic_inc(&template_end_count);
+
+ return 0;
+}
+
+static bool check_template(struct sw842_param *p, u8 c)
+{
+ u8 *t = comp_ops[c];
+ int i, match, b = 0;
+
+ if (c >= OPS_MAX)
+ return false;
+
+ for (i = 0; i < 4; i++) {
+ if (t[i] & OP_ACTION_INDEX) {
+ if (t[i] & OP_AMOUNT_2)
+ match = check_index(p, 2, b >> 1);
+ else if (t[i] & OP_AMOUNT_4)
+ match = check_index(p, 4, b >> 2);
+ else if (t[i] & OP_AMOUNT_8)
+ match = check_index(p, 8, 0);
+ else
+ return false;
+ if (!match)
+ return false;
+ }
+
+ b += t[i] & OP_AMOUNT;
+ }
+
+ return true;
+}
+
+static void get_next_data(struct sw842_param *p)
+{
+ p->data8[0] = get_input_data(p, 0, 64);
+ p->data4[0] = get_input_data(p, 0, 32);
+ p->data4[1] = get_input_data(p, 4, 32);
+ p->data2[0] = get_input_data(p, 0, 16);
+ p->data2[1] = get_input_data(p, 2, 16);
+ p->data2[2] = get_input_data(p, 4, 16);
+ p->data2[3] = get_input_data(p, 6, 16);
+}
+
+/* update the hashtable entries.
+ * only call this after finding/adding the current template
+ * the dataN fields for the current 8 byte block must be already updated
+ */
+static void update_hashtables(struct sw842_param *p)
+{
+ u64 pos = p->in - p->instart;
+ u64 n8 = (pos >> 3) % (1 << I8_BITS);
+ u64 n4 = (pos >> 2) % (1 << I4_BITS);
+ u64 n2 = (pos >> 1) % (1 << I2_BITS);
+
+ replace_hash(p, 8, n8, 0);
+ replace_hash(p, 4, n4, 0);
+ replace_hash(p, 4, n4, 1);
+ replace_hash(p, 2, n2, 0);
+ replace_hash(p, 2, n2, 1);
+ replace_hash(p, 2, n2, 2);
+ replace_hash(p, 2, n2, 3);
+}
+
+/* find the next template to use, and add it
+ * the p->dataN fields must already be set for the current 8 byte block
+ */
+static int process_next(struct sw842_param *p)
+{
+ int ret, i;
+
+ p->index8[0] = INDEX_NOT_CHECKED;
+ p->index4[0] = INDEX_NOT_CHECKED;
+ p->index4[1] = INDEX_NOT_CHECKED;
+ p->index2[0] = INDEX_NOT_CHECKED;
+ p->index2[1] = INDEX_NOT_CHECKED;
+ p->index2[2] = INDEX_NOT_CHECKED;
+ p->index2[3] = INDEX_NOT_CHECKED;
+
+ /* check up to OPS_MAX - 1; last op is our fallback */
+ for (i = 0; i < OPS_MAX - 1; i++) {
+ if (check_template(p, i))
+ break;
+ }
+
+ ret = add_template(p, i);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * sw842_compress
+ *
+ * Compress the uncompressed buffer of length @ilen at @in to the output buffer
+ * @out, using no more than @olen bytes, using the 842 compression format.
+ *
+ * Returns: 0 on success, error on failure. The @olen parameter
+ * will contain the number of output bytes written on success, or
+ * 0 on error.
+ */
+int sw842_compress(const u8 *in, unsigned int ilen,
+ u8 *out, unsigned int *olen, void *wmem)
+{
+ struct sw842_param *p = (struct sw842_param *)wmem;
+ int ret;
+ u64 last, next, pad, total;
+ u8 repeat_count = 0;
+ u32 crc;
+
+ BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS);
+
+ init_hashtable_nodes(p, 8);
+ init_hashtable_nodes(p, 4);
+ init_hashtable_nodes(p, 2);
+
+ p->in = (u8 *)in;
+ p->instart = p->in;
+ p->ilen = ilen;
+ p->out = out;
+ p->olen = *olen;
+ p->bit = 0;
+
+ total = p->olen;
+
+ *olen = 0;
+
+ /* if using strict mode, we can only compress a multiple of 8 */
+ if (sw842_strict && (ilen % 8)) {
+ pr_err("Using strict mode, can't compress len %d\n", ilen);
+ return -EINVAL;
+ }
+
+ /* let's compress at least 8 bytes, mkay? */
+ if (unlikely(ilen < 8))
+ goto skip_comp;
+
+ /* make initial 'last' different so we don't match the first time */
+ last = ~get_unaligned((u64 *)p->in);
+
+ while (p->ilen > 7) {
+ next = get_unaligned((u64 *)p->in);
+
+ /* must get the next data, as we need to update the hashtable
+ * entries with the new data every time
+ */
+ get_next_data(p);
+
+ /* we don't care about endianness in last or next;
+ * we're just comparing 8 bytes to another 8 bytes,
+ * they're both the same endianness
+ */
+ if (next == last) {
+ /* repeat count bits are 0-based, so we stop at +1 */
+ if (++repeat_count <= REPEAT_BITS_MAX)
+ goto repeat;
+ }
+ if (repeat_count) {
+ ret = add_repeat_template(p, repeat_count);
+ repeat_count = 0;
+ if (next == last) /* reached max repeat bits */
+ goto repeat;
+ }
+
+ if (next == 0)
+ ret = add_zeros_template(p);
+ else
+ ret = process_next(p);
+
+ if (ret)
+ return ret;
+
+repeat:
+ last = next;
+ update_hashtables(p);
+ p->in += 8;
+ p->ilen -= 8;
+ }
+
+ if (repeat_count) {
+ ret = add_repeat_template(p, repeat_count);
+ if (ret)
+ return ret;
+ }
+
+skip_comp:
+ if (p->ilen > 0) {
+ ret = add_short_data_template(p, p->ilen);
+ if (ret)
+ return ret;
+
+ p->in += p->ilen;
+ p->ilen = 0;
+ }
+
+ ret = add_end_template(p);
+ if (ret)
+ return ret;
+
+ /*
+ * crc(0:31) is appended to target data starting with the next
+ * bit after End of stream template.
+ * nx842 calculates CRC for data in big-endian format. So doing
+ * same here so that sw842 decompression can be used for both
+ * compressed data.
+ */
+ crc = crc32_be(0, in, ilen);
+ ret = add_bits(p, crc, CRC_BITS);
+ if (ret)
+ return ret;
+
+ if (p->bit) {
+ p->out++;
+ p->olen--;
+ p->bit = 0;
+ }
+
+ /* pad compressed length to multiple of 8 */
+ pad = (8 - ((total - p->olen) % 8)) % 8;
+ if (pad) {
+ if (pad > p->olen) /* we were so close! */
+ return -ENOSPC;
+ memset(p->out, 0, pad);
+ p->out += pad;
+ p->olen -= pad;
+ }
+
+ if (unlikely((total - p->olen) > UINT_MAX))
+ return -ENOSPC;
+
+ *olen = total - p->olen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sw842_compress);
+
+static int __init sw842_init(void)
+{
+ if (sw842_template_counts)
+ sw842_debugfs_create();
+
+ return 0;
+}
+module_init(sw842_init);
+
+static void __exit sw842_exit(void)
+{
+ if (sw842_template_counts)
+ sw842_debugfs_remove();
+}
+module_exit(sw842_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Software 842 Compressor");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
new file mode 100644
index 000000000..4469407c3
--- /dev/null
+++ b/lib/842/842_debugfs.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __842_DEBUGFS_H__
+#define __842_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+
+static bool sw842_template_counts;
+module_param_named(template_counts, sw842_template_counts, bool, 0444);
+
+static atomic_t template_count[OPS_MAX], template_repeat_count,
+ template_zeros_count, template_short_data_count, template_end_count;
+
+static struct dentry *sw842_debugfs_root;
+
+static int __init sw842_debugfs_create(void)
+{
+ umode_t m = S_IRUGO | S_IWUSR;
+ int i;
+
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(template_count); i++) {
+ char name[32];
+
+ snprintf(name, 32, "template_%02x", i);
+ debugfs_create_atomic_t(name, m, sw842_debugfs_root,
+ &template_count[i]);
+ }
+ debugfs_create_atomic_t("template_repeat", m, sw842_debugfs_root,
+ &template_repeat_count);
+ debugfs_create_atomic_t("template_zeros", m, sw842_debugfs_root,
+ &template_zeros_count);
+ debugfs_create_atomic_t("template_short_data", m, sw842_debugfs_root,
+ &template_short_data_count);
+ debugfs_create_atomic_t("template_end", m, sw842_debugfs_root,
+ &template_end_count);
+
+ return 0;
+}
+
+static void __exit sw842_debugfs_remove(void)
+{
+ debugfs_remove_recursive(sw842_debugfs_root);
+}
+
+#endif
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c
new file mode 100644
index 000000000..582085ef8
--- /dev/null
+++ b/lib/842/842_decompress.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * 842 Software Decompression
+ *
+ * Copyright (C) 2015 Dan Streetman, IBM Corp
+ *
+ * See 842.h for details of the 842 compressed format.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define MODULE_NAME "842_decompress"
+
+#include "842.h"
+#include "842_debugfs.h"
+
+/* rolling fifo sizes */
+#define I2_FIFO_SIZE (2 * (1 << I2_BITS))
+#define I4_FIFO_SIZE (4 * (1 << I4_BITS))
+#define I8_FIFO_SIZE (8 * (1 << I8_BITS))
+
+static u8 decomp_ops[OPS_MAX][4] = {
+ { D8, N0, N0, N0 },
+ { D4, D2, I2, N0 },
+ { D4, I2, D2, N0 },
+ { D4, I2, I2, N0 },
+ { D4, I4, N0, N0 },
+ { D2, I2, D4, N0 },
+ { D2, I2, D2, I2 },
+ { D2, I2, I2, D2 },
+ { D2, I2, I2, I2 },
+ { D2, I2, I4, N0 },
+ { I2, D2, D4, N0 },
+ { I2, D4, I2, N0 },
+ { I2, D2, I2, D2 },
+ { I2, D2, I2, I2 },
+ { I2, D2, I4, N0 },
+ { I2, I2, D4, N0 },
+ { I2, I2, D2, I2 },
+ { I2, I2, I2, D2 },
+ { I2, I2, I2, I2 },
+ { I2, I2, I4, N0 },
+ { I4, D4, N0, N0 },
+ { I4, D2, I2, N0 },
+ { I4, I2, D2, N0 },
+ { I4, I2, I2, N0 },
+ { I4, I4, N0, N0 },
+ { I8, N0, N0, N0 }
+};
+
+struct sw842_param {
+ u8 *in;
+ u8 bit;
+ u64 ilen;
+ u8 *out;
+ u8 *ostart;
+ u64 olen;
+};
+
+#define beN_to_cpu(d, s) \
+ ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \
+ (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \
+ (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \
+ 0)
+
+static int next_bits(struct sw842_param *p, u64 *d, u8 n);
+
+static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s)
+{
+ u64 tmp = 0;
+ int ret;
+
+ if (n <= s) {
+ pr_debug("split_next_bits invalid n %u s %u\n", n, s);
+ return -EINVAL;
+ }
+
+ ret = next_bits(p, &tmp, n - s);
+ if (ret)
+ return ret;
+ ret = next_bits(p, d, s);
+ if (ret)
+ return ret;
+ *d |= tmp << s;
+ return 0;
+}
+
+static int next_bits(struct sw842_param *p, u64 *d, u8 n)
+{
+ u8 *in = p->in, b = p->bit, bits = b + n;
+
+ if (n > 64) {
+ pr_debug("next_bits invalid n %u\n", n);
+ return -EINVAL;
+ }
+
+ /* split this up if reading > 8 bytes, or if we're at the end of
+ * the input buffer and would read past the end
+ */
+ if (bits > 64)
+ return __split_next_bits(p, d, n, 32);
+ else if (p->ilen < 8 && bits > 32 && bits <= 56)
+ return __split_next_bits(p, d, n, 16);
+ else if (p->ilen < 4 && bits > 16 && bits <= 24)
+ return __split_next_bits(p, d, n, 8);
+
+ if (DIV_ROUND_UP(bits, 8) > p->ilen)
+ return -EOVERFLOW;
+
+ if (bits <= 8)
+ *d = *in >> (8 - bits);
+ else if (bits <= 16)
+ *d = be16_to_cpu(get_unaligned((__be16 *)in)) >> (16 - bits);
+ else if (bits <= 32)
+ *d = be32_to_cpu(get_unaligned((__be32 *)in)) >> (32 - bits);
+ else
+ *d = be64_to_cpu(get_unaligned((__be64 *)in)) >> (64 - bits);
+
+ *d &= GENMASK_ULL(n - 1, 0);
+
+ p->bit += n;
+
+ if (p->bit > 7) {
+ p->in += p->bit / 8;
+ p->ilen -= p->bit / 8;
+ p->bit %= 8;
+ }
+
+ return 0;
+}
+
+static int do_data(struct sw842_param *p, u8 n)
+{
+ u64 v;
+ int ret;
+
+ if (n > p->olen)
+ return -ENOSPC;
+
+ ret = next_bits(p, &v, n * 8);
+ if (ret)
+ return ret;
+
+ switch (n) {
+ case 2:
+ put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out);
+ break;
+ case 4:
+ put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out);
+ break;
+ case 8:
+ put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p->out += n;
+ p->olen -= n;
+
+ return 0;
+}
+
+static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize)
+{
+ u64 index, offset, total = round_down(p->out - p->ostart, 8);
+ int ret;
+
+ ret = next_bits(p, &index, bits);
+ if (ret)
+ return ret;
+
+ offset = index * size;
+
+ /* a ring buffer of fsize is used; correct the offset */
+ if (total > fsize) {
+ /* this is where the current fifo is */
+ u64 section = round_down(total, fsize);
+ /* the current pos in the fifo */
+ u64 pos = total - section;
+
+ /* if the offset is past/at the pos, we need to
+ * go back to the last fifo section
+ */
+ if (offset >= pos)
+ section -= fsize;
+
+ offset += section;
+ }
+
+ if (offset + size > total) {
+ pr_debug("index%x %lx points past end %lx\n", size,
+ (unsigned long)offset, (unsigned long)total);
+ return -EINVAL;
+ }
+
+ if (size != 2 && size != 4 && size != 8)
+ WARN(1, "__do_index invalid size %x\n", size);
+ else
+ pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n",
+ size, (unsigned long)index,
+ (unsigned long)(index * size), (unsigned long)offset,
+ (unsigned long)total,
+ (unsigned long)beN_to_cpu(&p->ostart[offset], size));
+
+ memcpy(p->out, &p->ostart[offset], size);
+ p->out += size;
+ p->olen -= size;
+
+ return 0;
+}
+
+static int do_index(struct sw842_param *p, u8 n)
+{
+ switch (n) {
+ case 2:
+ return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE);
+ case 4:
+ return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE);
+ case 8:
+ return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int do_op(struct sw842_param *p, u8 o)
+{
+ int i, ret = 0;
+
+ if (o >= OPS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < 4; i++) {
+ u8 op = decomp_ops[o][i];
+
+ pr_debug("op is %x\n", op);
+
+ switch (op & OP_ACTION) {
+ case OP_ACTION_DATA:
+ ret = do_data(p, op & OP_AMOUNT);
+ break;
+ case OP_ACTION_INDEX:
+ ret = do_index(p, op & OP_AMOUNT);
+ break;
+ case OP_ACTION_NOOP:
+ break;
+ default:
+ pr_err("Internal error, invalid op %x\n", op);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ if (sw842_template_counts)
+ atomic_inc(&template_count[o]);
+
+ return 0;
+}
+
+/**
+ * sw842_decompress
+ *
+ * Decompress the 842-compressed buffer of length @ilen at @in
+ * to the output buffer @out, using no more than @olen bytes.
+ *
+ * The compressed buffer must be only a single 842-compressed buffer,
+ * with the standard format described in the comments in 842.h
+ * Processing will stop when the 842 "END" template is detected,
+ * not the end of the buffer.
+ *
+ * Returns: 0 on success, error on failure. The @olen parameter
+ * will contain the number of output bytes written on success, or
+ * 0 on error.
+ */
+int sw842_decompress(const u8 *in, unsigned int ilen,
+ u8 *out, unsigned int *olen)
+{
+ struct sw842_param p;
+ int ret;
+ u64 op, rep, tmp, bytes, total;
+ u64 crc;
+
+ p.in = (u8 *)in;
+ p.bit = 0;
+ p.ilen = ilen;
+ p.out = out;
+ p.ostart = out;
+ p.olen = *olen;
+
+ total = p.olen;
+
+ *olen = 0;
+
+ do {
+ ret = next_bits(&p, &op, OP_BITS);
+ if (ret)
+ return ret;
+
+ pr_debug("template is %lx\n", (unsigned long)op);
+
+ switch (op) {
+ case OP_REPEAT:
+ ret = next_bits(&p, &rep, REPEAT_BITS);
+ if (ret)
+ return ret;
+
+ if (p.out == out) /* no previous bytes */
+ return -EINVAL;
+
+ /* copy rep + 1 */
+ rep++;
+
+ if (rep * 8 > p.olen)
+ return -ENOSPC;
+
+ while (rep-- > 0) {
+ memcpy(p.out, p.out - 8, 8);
+ p.out += 8;
+ p.olen -= 8;
+ }
+
+ if (sw842_template_counts)
+ atomic_inc(&template_repeat_count);
+
+ break;
+ case OP_ZEROS:
+ if (8 > p.olen)
+ return -ENOSPC;
+
+ memset(p.out, 0, 8);
+ p.out += 8;
+ p.olen -= 8;
+
+ if (sw842_template_counts)
+ atomic_inc(&template_zeros_count);
+
+ break;
+ case OP_SHORT_DATA:
+ ret = next_bits(&p, &bytes, SHORT_DATA_BITS);
+ if (ret)
+ return ret;
+
+ if (!bytes || bytes > SHORT_DATA_BITS_MAX)
+ return -EINVAL;
+
+ while (bytes-- > 0) {
+ ret = next_bits(&p, &tmp, 8);
+ if (ret)
+ return ret;
+ *p.out = (u8)tmp;
+ p.out++;
+ p.olen--;
+ }
+
+ if (sw842_template_counts)
+ atomic_inc(&template_short_data_count);
+
+ break;
+ case OP_END:
+ if (sw842_template_counts)
+ atomic_inc(&template_end_count);
+
+ break;
+ default: /* use template */
+ ret = do_op(&p, op);
+ if (ret)
+ return ret;
+ break;
+ }
+ } while (op != OP_END);
+
+ /*
+ * crc(0:31) is saved in compressed data starting with the
+ * next bit after End of stream template.
+ */
+ ret = next_bits(&p, &crc, CRC_BITS);
+ if (ret)
+ return ret;
+
+ /*
+ * Validate CRC saved in compressed data.
+ */
+ if (crc != (u64)crc32_be(0, out, total - p.olen)) {
+ pr_debug("CRC mismatch for decompression\n");
+ return -EINVAL;
+ }
+
+ if (unlikely((total - p.olen) > UINT_MAX))
+ return -ENOSPC;
+
+ *olen = total - p.olen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sw842_decompress);
+
+static int __init sw842_init(void)
+{
+ if (sw842_template_counts)
+ sw842_debugfs_create();
+
+ return 0;
+}
+module_init(sw842_init);
+
+static void __exit sw842_exit(void)
+{
+ if (sw842_template_counts)
+ sw842_debugfs_remove();
+}
+module_exit(sw842_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Software 842 Decompressor");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/lib/842/Makefile b/lib/842/Makefile
new file mode 100644
index 000000000..6f7aad269
--- /dev/null
+++ b/lib/842/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_842_COMPRESS) += 842_compress.o
+obj-$(CONFIG_842_DECOMPRESS) += 842_decompress.o
diff --git a/lib/Kconfig b/lib/Kconfig
new file mode 100644
index 000000000..c686f4adc
--- /dev/null
+++ b/lib/Kconfig
@@ -0,0 +1,766 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Library configuration
+#
+
+config BINARY_PRINTF
+ def_bool n
+
+menu "Library routines"
+
+config RAID6_PQ
+ tristate
+
+config RAID6_PQ_BENCHMARK
+ bool "Automatically choose fastest RAID6 PQ functions"
+ depends on RAID6_PQ
+ default y
+ help
+ Benchmark all available RAID6 PQ functions on init and choose the
+ fastest one.
+
+config LINEAR_RANGES
+ tristate
+
+config PACKING
+ bool "Generic bitfield packing and unpacking"
+ select BITREVERSE
+ default n
+ help
+ This option provides the packing() helper function, which permits
+ converting bitfields between a CPU-usable representation and a
+ memory representation that can have any combination of these quirks:
+ - Is little endian (bytes are reversed within a 32-bit group)
+ - The least-significant 32-bit word comes first (within a 64-bit
+ group)
+ - The most significant bit of a byte is at its right (bit 0 of a
+ register description is numerically 2^7).
+ Drivers may use these helpers to match the bit indices as described
+ in the data sheets of the peripherals they are in control of.
+
+ When in doubt, say N.
+
+config BITREVERSE
+ tristate
+
+config HAVE_ARCH_BITREVERSE
+ bool
+ default n
+ help
+ This option enables the use of hardware bit-reversal instructions on
+ architectures which support such operations.
+
+config ARCH_HAS_STRNCPY_FROM_USER
+ bool
+
+config ARCH_HAS_STRNLEN_USER
+ bool
+
+config GENERIC_STRNCPY_FROM_USER
+ def_bool !ARCH_HAS_STRNCPY_FROM_USER
+
+config GENERIC_STRNLEN_USER
+ def_bool !ARCH_HAS_STRNLEN_USER
+
+config GENERIC_NET_UTILS
+ bool
+
+source "lib/math/Kconfig"
+
+config NO_GENERIC_PCI_IOPORT_MAP
+ bool
+
+config GENERIC_PCI_IOMAP
+ bool
+
+config GENERIC_IOMAP
+ bool
+ select GENERIC_PCI_IOMAP
+
+config STMP_DEVICE
+ bool
+
+config ARCH_USE_CMPXCHG_LOCKREF
+ bool
+
+config ARCH_HAS_FAST_MULTIPLIER
+ bool
+
+config ARCH_USE_SYM_ANNOTATIONS
+ bool
+
+config INDIRECT_PIO
+ bool "Access I/O in non-MMIO mode"
+ depends on ARM64
+ depends on HAS_IOPORT
+ help
+ On some platforms where no separate I/O space exists, there are I/O
+ hosts which can not be accessed in MMIO mode. Using the logical PIO
+ mechanism, the host-local I/O resource can be mapped into system
+ logic PIO space shared with MMIO hosts, such as PCI/PCIe, then the
+ system can access the I/O devices with the mapped-logic PIO through
+ I/O accessors.
+
+ This way has relatively little I/O performance cost. Please make
+ sure your devices really need this configure item enabled.
+
+ When in doubt, say N.
+
+config INDIRECT_IOMEM
+ bool
+ help
+ This is selected by other options/architectures to provide the
+ emulated iomem accessors.
+
+config INDIRECT_IOMEM_FALLBACK
+ bool
+ depends on INDIRECT_IOMEM
+ help
+ If INDIRECT_IOMEM is selected, this enables falling back to plain
+ mmio accesses when the IO memory address is not a registered
+ emulated region.
+
+config TRACE_MMIO_ACCESS
+ bool "Register read/write tracing"
+ depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS
+ help
+ Create tracepoints for MMIO read/write operations. These trace events
+ can be used for logging all MMIO read/write operations.
+
+source "lib/crypto/Kconfig"
+
+config CRC_CCITT
+ tristate "CRC-CCITT functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC-CCITT functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC-CCITT
+ functions require M here.
+
+config CRC16
+ tristate "CRC16 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC16 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC16
+ functions require M here.
+
+config CRC_T10DIF
+ tristate "CRC calculation for the T10 Data Integrity Field"
+ select CRYPTO
+ select CRYPTO_CRCT10DIF
+ help
+ This option is only needed if a module that's not in the
+ kernel tree needs to calculate CRC checks for use with the
+ SCSI data integrity subsystem.
+
+config CRC64_ROCKSOFT
+ tristate "CRC calculation for the Rocksoft model CRC64"
+ select CRC64
+ select CRYPTO
+ select CRYPTO_CRC64_ROCKSOFT
+ help
+ This option provides a CRC64 API to a registered crypto driver.
+ This is used with the block layer's data integrity subsystem.
+
+config CRC_ITU_T
+ tristate "CRC ITU-T V.41 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC ITU-T V.41 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC ITU-T V.41
+ functions require M here.
+
+config CRC32
+ tristate "CRC32/CRC32c functions"
+ default y
+ select BITREVERSE
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC32/CRC32c functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC32/CRC32c
+ functions require M here.
+
+config CRC32_SELFTEST
+ tristate "CRC32 perform self test on init"
+ depends on CRC32
+ help
+ This option enables the CRC32 library functions to perform a
+ self test on initialization. The self test computes crc32_le
+ and crc32_be over byte strings with random alignment and length
+ and computes the total elapsed time and number of bytes processed.
+
+choice
+ prompt "CRC32 implementation"
+ depends on CRC32
+ default CRC32_SLICEBY8
+ help
+ This option allows a kernel builder to override the default choice
+ of CRC32 algorithm. Choose the default ("slice by 8") unless you
+ know that you need one of the others.
+
+config CRC32_SLICEBY8
+ bool "Slice by 8 bytes"
+ help
+ Calculate checksum 8 bytes at a time with a clever slicing algorithm.
+ This is the fastest algorithm, but comes with a 8KiB lookup table.
+ Most modern processors have enough cache to hold this table without
+ thrashing the cache.
+
+ This is the default implementation choice. Choose this one unless
+ you have a good reason not to.
+
+config CRC32_SLICEBY4
+ bool "Slice by 4 bytes"
+ help
+ Calculate checksum 4 bytes at a time with a clever slicing algorithm.
+ This is a bit slower than slice by 8, but has a smaller 4KiB lookup
+ table.
+
+ Only choose this option if you know what you are doing.
+
+config CRC32_SARWATE
+ bool "Sarwate's Algorithm (one byte at a time)"
+ help
+ Calculate checksum a byte at a time using Sarwate's algorithm. This
+ is not particularly fast, but has a small 256 byte lookup table.
+
+ Only choose this option if you know what you are doing.
+
+config CRC32_BIT
+ bool "Classic Algorithm (one bit at a time)"
+ help
+ Calculate checksum one bit at a time. This is VERY slow, but has
+ no lookup table. This is provided as a debugging option.
+
+ Only choose this option if you are debugging crc32.
+
+endchoice
+
+config CRC64
+ tristate "CRC64 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC64 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC64
+ functions require M here.
+
+config CRC4
+ tristate "CRC4 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC4 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC4
+ functions require M here.
+
+config CRC7
+ tristate "CRC7 functions"
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC7 functions, but a module built outside
+ the kernel tree does. Such modules that use library CRC7
+ functions require M here.
+
+config LIBCRC32C
+ tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
+ select CRYPTO
+ select CRYPTO_CRC32C
+ help
+ This option is provided for the case where no in-kernel-tree
+ modules require CRC32c functions, but a module built outside the
+ kernel tree does. Such modules that use library CRC32c functions
+ require M here. See Castagnoli93.
+ Module will be libcrc32c.
+
+config CRC8
+ tristate "CRC8 function"
+ help
+ This option provides CRC8 function. Drivers may select this
+ when they need to do cyclic redundancy check according CRC8
+ algorithm. Module will be called crc8.
+
+config XXHASH
+ tristate
+
+config AUDIT_GENERIC
+ bool
+ depends on AUDIT && !AUDIT_ARCH
+ default y
+
+config AUDIT_ARCH_COMPAT_GENERIC
+ bool
+ default n
+
+config AUDIT_COMPAT_GENERIC
+ bool
+ depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
+ default y
+
+config RANDOM32_SELFTEST
+ bool "PRNG perform self test on init"
+ help
+ This option enables the 32 bit PRNG library functions to perform a
+ self test on initialization.
+
+#
+# compression support is select'ed if needed
+#
+config 842_COMPRESS
+ select CRC32
+ tristate
+
+config 842_DECOMPRESS
+ select CRC32
+ tristate
+
+config ZLIB_INFLATE
+ tristate
+
+config ZLIB_DEFLATE
+ tristate
+ select BITREVERSE
+
+config ZLIB_DFLTCC
+ def_bool y
+ depends on S390
+ prompt "Enable s390x DEFLATE CONVERSION CALL support for kernel zlib"
+ help
+ Enable s390x hardware support for zlib in the kernel.
+
+config LZO_COMPRESS
+ tristate
+
+config LZO_DECOMPRESS
+ tristate
+
+config LZ4_COMPRESS
+ tristate
+
+config LZ4HC_COMPRESS
+ tristate
+
+config LZ4_DECOMPRESS
+ tristate
+
+config ZSTD_COMMON
+ select XXHASH
+ tristate
+
+config ZSTD_COMPRESS
+ select ZSTD_COMMON
+ tristate
+
+config ZSTD_DECOMPRESS
+ select ZSTD_COMMON
+ tristate
+
+source "lib/xz/Kconfig"
+
+#
+# These all provide a common interface (hence the apparent duplication with
+# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+#
+config DECOMPRESS_GZIP
+ select ZLIB_INFLATE
+ tristate
+
+config DECOMPRESS_BZIP2
+ tristate
+
+config DECOMPRESS_LZMA
+ tristate
+
+config DECOMPRESS_XZ
+ select XZ_DEC
+ tristate
+
+config DECOMPRESS_LZO
+ select LZO_DECOMPRESS
+ tristate
+
+config DECOMPRESS_LZ4
+ select LZ4_DECOMPRESS
+ tristate
+
+config DECOMPRESS_ZSTD
+ select ZSTD_DECOMPRESS
+ tristate
+
+#
+# Generic allocator support is selected if needed
+#
+config GENERIC_ALLOCATOR
+ bool
+
+#
+# reed solomon support is select'ed if needed
+#
+config REED_SOLOMON
+ tristate
+
+config REED_SOLOMON_ENC8
+ bool
+
+config REED_SOLOMON_DEC8
+ bool
+
+config REED_SOLOMON_ENC16
+ bool
+
+config REED_SOLOMON_DEC16
+ bool
+
+#
+# BCH support is selected if needed
+#
+config BCH
+ tristate
+ select BITREVERSE
+
+config BCH_CONST_PARAMS
+ bool
+ help
+ Drivers may select this option to force specific constant
+ values for parameters 'm' (Galois field order) and 't'
+ (error correction capability). Those specific values must
+ be set by declaring default values for symbols BCH_CONST_M
+ and BCH_CONST_T.
+ Doing so will enable extra compiler optimizations,
+ improving encoding and decoding performance up to 2x for
+ usual (m,t) values (typically such that m*t < 200).
+ When this option is selected, the BCH library supports
+ only a single (m,t) configuration. This is mainly useful
+ for NAND flash board drivers requiring known, fixed BCH
+ parameters.
+
+config BCH_CONST_M
+ int
+ range 5 15
+ help
+ Constant value for Galois field order 'm'. If 'k' is the
+ number of data bits to protect, 'm' should be chosen such
+ that (k + m*t) <= 2**m - 1.
+ Drivers should declare a default value for this symbol if
+ they select option BCH_CONST_PARAMS.
+
+config BCH_CONST_T
+ int
+ help
+ Constant value for error correction capability in bits 't'.
+ Drivers should declare a default value for this symbol if
+ they select option BCH_CONST_PARAMS.
+
+#
+# Textsearch support is select'ed if needed
+#
+config TEXTSEARCH
+ bool
+
+config TEXTSEARCH_KMP
+ tristate
+
+config TEXTSEARCH_BM
+ tristate
+
+config TEXTSEARCH_FSM
+ tristate
+
+config BTREE
+ bool
+
+config INTERVAL_TREE
+ bool
+ help
+ Simple, embeddable, interval-tree. Can find the start of an
+ overlapping range in log(n) time and then iterate over all
+ overlapping nodes. The algorithm is implemented as an
+ augmented rbtree.
+
+ See:
+
+ Documentation/core-api/rbtree.rst
+
+ for more information.
+
+config INTERVAL_TREE_SPAN_ITER
+ bool
+ depends on INTERVAL_TREE
+
+config XARRAY_MULTI
+ bool
+ help
+ Support entries which occupy multiple consecutive indices in the
+ XArray.
+
+config ASSOCIATIVE_ARRAY
+ bool
+ help
+ Generic associative array. Can be searched and iterated over whilst
+ it is being modified. It is also reasonably quick to search and
+ modify. The algorithms are non-recursive, and the trees are highly
+ capacious.
+
+ See:
+
+ Documentation/core-api/assoc_array.rst
+
+ for more information.
+
+config HAS_IOMEM
+ bool
+ depends on !NO_IOMEM
+ default y
+
+config HAS_IOPORT
+ bool
+
+config HAS_IOPORT_MAP
+ bool
+ depends on HAS_IOMEM && !NO_IOPORT_MAP
+ default y
+
+source "kernel/dma/Kconfig"
+
+config SGL_ALLOC
+ bool
+ default n
+
+config IOMMU_HELPER
+ bool
+
+config CHECK_SIGNATURE
+ bool
+
+config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
+ stack overflow.
+
+config FORCE_NR_CPUS
+ bool "Set number of CPUs at compile time"
+ depends on SMP && EXPERT && !COMPILE_TEST
+ help
+ Say Yes if you have NR_CPUS set to an actual number of possible
+ CPUs in your system, not to a default value. This forces the core
+ code to rely on compile-time value and optimize kernel routines
+ better.
+
+config CPU_RMAP
+ bool
+ depends on SMP
+
+config DQL
+ bool
+
+config GLOB
+ bool
+# This actually supports modular compilation, but the module overhead
+# is ridiculous for the amount of code involved. Until an out-of-tree
+# driver asks for it, we'll just link it directly it into the kernel
+# when required. Since we're ignoring out-of-tree users, there's also
+# no need bother prompting for a manual decision:
+# prompt "glob_match() function"
+ help
+ This option provides a glob_match function for performing
+ simple text pattern matching. It originated in the ATA code
+ to blacklist particular drive models, but other device drivers
+ may need similar functionality.
+
+ All drivers in the Linux kernel tree that require this function
+ should automatically select this option. Say N unless you
+ are compiling an out-of tree driver which tells you that it
+ depends on this.
+
+config GLOB_SELFTEST
+ tristate "glob self-test on init"
+ depends on GLOB
+ help
+ This option enables a simple self-test of the glob_match
+ function on startup. It is primarily useful for people
+ working on the code to ensure they haven't introduced any
+ regressions.
+
+ It only adds a little bit of code and slows kernel boot (or
+ module load) by a small amount, so you're welcome to play with
+ it, but you probably don't need it.
+
+#
+# Netlink attribute parsing support is select'ed if needed
+#
+config NLATTR
+ bool
+
+#
+# Generic 64-bit atomic support is selected if needed
+#
+config GENERIC_ATOMIC64
+ bool
+
+config LRU_CACHE
+ tristate
+
+config CLZ_TAB
+ bool
+
+config IRQ_POLL
+ bool "IRQ polling library"
+ help
+ Helper library to poll interrupt mitigation using polling.
+
+config MPILIB
+ tristate
+ select CLZ_TAB
+ help
+ Multiprecision maths library from GnuPG.
+ It is used to implement RSA digital signature verification,
+ which is used by IMA/EVM digital signature extension.
+
+config SIGNATURE
+ tristate
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_SHA1
+ select MPILIB
+ help
+ Digital signature verification. Currently only RSA is supported.
+ Implementation is done using GnuPG MPI library
+
+config DIMLIB
+ bool
+ help
+ Dynamic Interrupt Moderation library.
+ Implements an algorithm for dynamically changing CQ moderation values
+ according to run time performance.
+
+#
+# libfdt files, only selected if needed.
+#
+config LIBFDT
+ bool
+
+config OID_REGISTRY
+ tristate
+ help
+ Enable fast lookup object identifier registry.
+
+config UCS2_STRING
+ tristate
+
+#
+# generic vdso
+#
+source "lib/vdso/Kconfig"
+
+source "lib/fonts/Kconfig"
+
+config SG_SPLIT
+ def_bool n
+ help
+ Provides a helper to split scatterlists into chunks, each chunk being
+ a scatterlist. This should be selected by a driver or an API which
+ whishes to split a scatterlist amongst multiple DMA channels.
+
+config SG_POOL
+ def_bool n
+ help
+ Provides a helper to allocate chained scatterlists. This should be
+ selected by a driver or an API which whishes to allocate chained
+ scatterlist.
+
+#
+# sg chaining option
+#
+
+config ARCH_NO_SG_CHAIN
+ def_bool n
+
+config ARCH_HAS_PMEM_API
+ bool
+
+config MEMREGION
+ bool
+
+config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+ bool
+
+config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
+ bool
+
+# use memcpy to implement user copies for nommu architectures
+config UACCESS_MEMCPY
+ bool
+
+config ARCH_HAS_UACCESS_FLUSHCACHE
+ bool
+
+# arch has a concept of a recoverable synchronous exception due to a
+# memory-read error like x86 machine-check or ARM data-abort, and
+# implements copy_mc_to_{user,kernel} to abort and report
+# 'bytes-transferred' if that exception fires when accessing the source
+# buffer.
+config ARCH_HAS_COPY_MC
+ bool
+
+# Temporary. Goes away when all archs are cleaned up
+config ARCH_STACKWALK
+ bool
+
+config STACKDEPOT
+ bool
+ select STACKTRACE
+
+config STACKDEPOT_ALWAYS_INIT
+ bool
+ select STACKDEPOT
+
+config REF_TRACKER
+ bool
+ depends on STACKTRACE_SUPPORT
+ select STACKDEPOT
+
+config SBITMAP
+ bool
+
+config PARMAN
+ tristate "parman" if COMPILE_TEST
+
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
+
+endmenu
+
+config GENERIC_IOREMAP
+ bool
+
+config GENERIC_LIB_ASHLDI3
+ bool
+
+config GENERIC_LIB_ASHRDI3
+ bool
+
+config GENERIC_LIB_LSHRDI3
+ bool
+
+config GENERIC_LIB_MULDI3
+ bool
+
+config GENERIC_LIB_CMPDI2
+ bool
+
+config GENERIC_LIB_UCMPDI2
+ bool
+
+config GENERIC_LIB_DEVMEM_IS_ALLOWED
+ bool
+
+config PLDMFW
+ bool
+ default n
+
+config ASN1_ENCODER
+ tristate
+
+config POLYNOMIAL
+ tristate
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
new file mode 100644
index 000000000..1331f3186
--- /dev/null
+++ b/lib/Kconfig.debug
@@ -0,0 +1,3027 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Kernel hacking"
+
+menu "printk and dmesg options"
+
+config PRINTK_TIME
+ bool "Show timing information on printks"
+ depends on PRINTK
+ help
+ Selecting this option causes time stamps of the printk()
+ messages to be added to the output of the syslog() system
+ call and at the console.
+
+ The timestamp is always recorded internally, and exported
+ to /dev/kmsg. This flag just specifies if the timestamp should
+ be included, not that the timestamp is recorded.
+
+ The behavior is also controlled by the kernel command line
+ parameter printk.time=1. See Documentation/admin-guide/kernel-parameters.rst
+
+config PRINTK_CALLER
+ bool "Show caller information on printks"
+ depends on PRINTK
+ help
+ Selecting this option causes printk() to add a caller "thread id" (if
+ in task context) or a caller "processor id" (if not in task context)
+ to every message.
+
+ This option is intended for environments where multiple threads
+ concurrently call printk() for many times, for it is difficult to
+ interpret without knowing where these lines (or sometimes individual
+ line which was divided into multiple lines due to race) came from.
+
+ Since toggling after boot makes the code racy, currently there is
+ no option to enable/disable at the kernel command line parameter or
+ sysfs interface.
+
+config STACKTRACE_BUILD_ID
+ bool "Show build ID information in stacktraces"
+ depends on PRINTK
+ help
+ Selecting this option adds build ID information for symbols in
+ stacktraces printed with the printk format '%p[SR]b'.
+
+ This option is intended for distros where debuginfo is not easily
+ accessible but can be downloaded given the build ID of the vmlinux or
+ kernel module where the function is located.
+
+config CONSOLE_LOGLEVEL_DEFAULT
+ int "Default console loglevel (1-15)"
+ range 1 15
+ default "7"
+ help
+ Default loglevel to determine what will be printed on the console.
+
+ Setting a default here is equivalent to passing in loglevel=<x> in
+ the kernel bootargs. loglevel=<x> continues to override whatever
+ value is specified here as well.
+
+ Note: This does not affect the log level of un-prefixed printk()
+ usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
+ option.
+
+config CONSOLE_LOGLEVEL_QUIET
+ int "quiet console loglevel (1-15)"
+ range 1 15
+ default "4"
+ help
+ loglevel to use when "quiet" is passed on the kernel commandline.
+
+ When "quiet" is passed on the kernel commandline this loglevel
+ will be used as the loglevel. IOW passing "quiet" will be the
+ equivalent of passing "loglevel=<CONSOLE_LOGLEVEL_QUIET>"
+
+config MESSAGE_LOGLEVEL_DEFAULT
+ int "Default message log level (1-7)"
+ range 1 7
+ default "4"
+ help
+ Default log level for printk statements with no specified priority.
+
+ This was hard-coded to KERN_WARNING since at least 2.6.10 but folks
+ that are auditing their logs closely may want to set it to a lower
+ priority.
+
+ Note: This does not affect what message level gets printed on the console
+ by default. To change that, use loglevel=<x> in the kernel bootargs,
+ or pick a different CONSOLE_LOGLEVEL_DEFAULT configuration value.
+
+config BOOT_PRINTK_DELAY
+ bool "Delay each boot printk message by N milliseconds"
+ depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY
+ help
+ This build option allows you to read kernel boot messages
+ by inserting a short delay after each one. The delay is
+ specified in milliseconds on the kernel command line,
+ using "boot_delay=N".
+
+ It is likely that you would also need to use "lpj=M" to preset
+ the "loops per jiffie" value.
+ See a previous boot log for the "lpj" value to use for your
+ system, and then set "lpj=M" before setting "boot_delay=N".
+ NOTE: Using this option may adversely affect SMP systems.
+ I.e., processors other than the first one may not boot up.
+ BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect
+ what it believes to be lockup conditions.
+
+config DYNAMIC_DEBUG
+ bool "Enable dynamic printk() support"
+ default n
+ depends on PRINTK
+ depends on (DEBUG_FS || PROC_FS)
+ select DYNAMIC_DEBUG_CORE
+ help
+
+ Compiles debug level messages into the kernel, which would not
+ otherwise be available at runtime. These messages can then be
+ enabled/disabled based on various levels of scope - per source file,
+ function, module, format string, and line number. This mechanism
+ implicitly compiles in all pr_debug() and dev_dbg() calls, which
+ enlarges the kernel text size by about 2%.
+
+ If a source file is compiled with DEBUG flag set, any
+ pr_debug() calls in it are enabled by default, but can be
+ disabled at runtime as below. Note that DEBUG flag is
+ turned on by many CONFIG_*DEBUG* options.
+
+ Usage:
+
+ Dynamic debugging is controlled via the 'dynamic_debug/control' file,
+ which is contained in the 'debugfs' filesystem or procfs.
+ Thus, the debugfs or procfs filesystem must first be mounted before
+ making use of this feature.
+ We refer the control file as: <debugfs>/dynamic_debug/control. This
+ file contains a list of the debug statements that can be enabled. The
+ format for each line of the file is:
+
+ filename:lineno [module]function flags format
+
+ filename : source file of the debug statement
+ lineno : line number of the debug statement
+ module : module that contains the debug statement
+ function : function that contains the debug statement
+ flags : '=p' means the line is turned 'on' for printing
+ format : the format used for the debug statement
+
+ From a live system:
+
+ nullarbor:~ # cat <debugfs>/dynamic_debug/control
+ # filename:lineno [module]function flags format
+ fs/aio.c:222 [aio]__put_ioctx =_ "__put_ioctx:\040freeing\040%p\012"
+ fs/aio.c:248 [aio]ioctx_alloc =_ "ENOMEM:\040nr_events\040too\040high\012"
+ fs/aio.c:1770 [aio]sys_io_cancel =_ "calling\040cancel\012"
+
+ Example usage:
+
+ // enable the message at line 1603 of file svcsock.c
+ nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable all the messages in file svcsock.c
+ nullarbor:~ # echo -n 'file svcsock.c +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable all the messages in the NFS server module
+ nullarbor:~ # echo -n 'module nfsd +p' >
+ <debugfs>/dynamic_debug/control
+
+ // enable all 12 messages in the function svc_process()
+ nullarbor:~ # echo -n 'func svc_process +p' >
+ <debugfs>/dynamic_debug/control
+
+ // disable all 12 messages in the function svc_process()
+ nullarbor:~ # echo -n 'func svc_process -p' >
+ <debugfs>/dynamic_debug/control
+
+ See Documentation/admin-guide/dynamic-debug-howto.rst for additional
+ information.
+
+config DYNAMIC_DEBUG_CORE
+ bool "Enable core function of dynamic debug support"
+ depends on PRINTK
+ depends on (DEBUG_FS || PROC_FS)
+ help
+ Enable core functional support of dynamic debug. It is useful
+ when you want to tie dynamic debug to your kernel modules with
+ DYNAMIC_DEBUG_MODULE defined for each of them, especially for
+ the case of embedded system where the kernel image size is
+ sensitive for people.
+
+config SYMBOLIC_ERRNAME
+ bool "Support symbolic error names in printf"
+ default y if PRINTK
+ help
+ If you say Y here, the kernel's printf implementation will
+ be able to print symbolic error names such as ENOSPC instead
+ of the number 28. It makes the kernel image slightly larger
+ (about 3KB), but can make the kernel logs easier to read.
+
+config DEBUG_BUGVERBOSE
+ bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
+ depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
+ default y
+ help
+ Say Y here to make BUG() panics output the file name and line number
+ of the BUG call as well as the EIP and oops trace. This aids
+ debugging but costs about 70-100K of memory.
+
+endmenu # "printk and dmesg options"
+
+config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
+config DEBUG_MISC
+ bool "Miscellaneous debug code"
+ default DEBUG_KERNEL
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you need to enable miscellaneous debug code that should
+ be under a more specific debug option but isn't.
+
+menu "Compile-time checks and compiler options"
+
+config DEBUG_INFO
+ bool
+ help
+ A kernel debug info option other than "None" has been selected
+ in the "Debug information" choice below, indicating that debug
+ information will be generated for build targets.
+
+# Clang is known to generate .{s,u}leb128 with symbol deltas with DWARF5, which
+# some targets may not support: https://sourceware.org/bugzilla/show_bug.cgi?id=27215
+config AS_HAS_NON_CONST_LEB128
+ def_bool $(as-instr,.uleb128 .Lexpr_end4 - .Lexpr_start3\n.Lexpr_start3:\n.Lexpr_end4:)
+
+choice
+ prompt "Debug information"
+ depends on DEBUG_KERNEL
+ help
+ Selecting something other than "None" results in a kernel image
+ that will include debugging info resulting in a larger kernel image.
+ This adds debug symbols to the kernel and modules (gcc -g), and
+ is needed if you intend to use kernel crashdump or binary object
+ tools like crash, kgdb, LKCD, gdb, etc on the kernel.
+
+ Choose which version of DWARF debug info to emit. If unsure,
+ select "Toolchain default".
+
+config DEBUG_INFO_NONE
+ bool "Disable debug information"
+ help
+ Do not build the kernel with debugging information, which will
+ result in a faster and smaller build.
+
+config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
+ bool "Rely on the toolchain's implicit default DWARF version"
+ select DEBUG_INFO
+ depends on !CC_IS_CLANG || AS_IS_LLVM || CLANG_VERSION < 140000 || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+ help
+ The implicit default version of DWARF debug info produced by a
+ toolchain changes over time.
+
+ This can break consumers of the debug info that haven't upgraded to
+ support newer revisions, and prevent testing newer versions, but
+ those should be less common scenarios.
+
+config DEBUG_INFO_DWARF4
+ bool "Generate DWARF Version 4 debuginfo"
+ select DEBUG_INFO
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)
+ help
+ Generate DWARF v4 debug info. This requires gcc 4.5+, binutils 2.35.2
+ if using clang without clang's integrated assembler, and gdb 7.0+.
+
+ If you have consumers of DWARF debug info that are not ready for
+ newer revisions of DWARF, you may wish to choose this or have your
+ config select this.
+
+config DEBUG_INFO_DWARF5
+ bool "Generate DWARF Version 5 debuginfo"
+ select DEBUG_INFO
+ depends on !CC_IS_CLANG || AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502 && AS_HAS_NON_CONST_LEB128)
+ help
+ Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
+ 5.0+ accepts the -gdwarf-5 flag but only had partial support for some
+ draft features until 7.0), and gdb 8.0+.
+
+ Changes to the structure of debug info in Version 5 allow for around
+ 15-18% savings in resulting image and debug info section sizes as
+ compared to DWARF Version 4. DWARF Version 5 standardizes previous
+ extensions such as accelerators for symbol indexing and the format
+ for fission (.dwo/.dwp) files. Users may not want to select this
+ config if they rely on tooling that has not yet been updated to
+ support DWARF Version 5.
+
+endchoice # "Debug information"
+
+if DEBUG_INFO
+
+config DEBUG_INFO_REDUCED
+ bool "Reduce debugging information"
+ help
+ If you say Y here gcc is instructed to generate less debugging
+ information for structure types. This means that tools that
+ need full debugging information (like kgdb or systemtap) won't
+ be happy. But if you merely need debugging information to
+ resolve line numbers there is no loss. Advantage is that
+ build directory object sizes shrink dramatically over a full
+ DEBUG_INFO build and compile times are reduced too.
+ Only works with newer gcc versions.
+
+choice
+ prompt "Compressed Debug information"
+ help
+ Compress the resulting debug info. Results in smaller debug info sections,
+ but requires that consumers are able to decompress the results.
+
+ If unsure, choose DEBUG_INFO_COMPRESSED_NONE.
+
+config DEBUG_INFO_COMPRESSED_NONE
+ bool "Don't compress debug information"
+ help
+ Don't compress debug info sections.
+
+config DEBUG_INFO_COMPRESSED_ZLIB
+ bool "Compress debugging information with zlib"
+ depends on $(cc-option,-gz=zlib)
+ depends on $(ld-option,--compress-debug-sections=zlib)
+ help
+ Compress the debug information using zlib. Requires GCC 5.0+ or Clang
+ 5.0+, binutils 2.26+, and zlib.
+
+ Users of dpkg-deb via scripts/package/builddeb may find an increase in
+ size of their debug .deb packages with this config set, due to the
+ debug info being compressed with zlib, then the object files being
+ recompressed with a different compression scheme. But this is still
+ preferable to setting $KDEB_COMPRESS to "none" which would be even
+ larger.
+
+config DEBUG_INFO_COMPRESSED_ZSTD
+ bool "Compress debugging information with zstd"
+ depends on $(cc-option,-gz=zstd)
+ depends on $(ld-option,--compress-debug-sections=zstd)
+ help
+ Compress the debug information using zstd. This may provide better
+ compression than zlib, for about the same time costs, but requires newer
+ toolchain support. Requires GCC 13.0+ or Clang 16.0+, binutils 2.40+, and
+ zstd.
+
+endchoice # "Compressed Debug information"
+
+config DEBUG_INFO_SPLIT
+ bool "Produce split debuginfo in .dwo files"
+ depends on $(cc-option,-gsplit-dwarf)
+ # RISC-V linker relaxation + -gsplit-dwarf has issues with LLVM and GCC
+ # prior to 12.x:
+ # https://github.com/llvm/llvm-project/issues/56642
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99090
+ depends on !RISCV || GCC_VERSION >= 120000
+ help
+ Generate debug info into separate .dwo files. This significantly
+ reduces the build directory size for builds with DEBUG_INFO,
+ because it stores the information only once on disk in .dwo
+ files instead of multiple times in object files and executables.
+ In addition the debug information is also compressed.
+
+ Requires recent gcc (4.7+) and recent gdb/binutils.
+ Any tool that packages or reads debug information would need
+ to know about the .dwo files and include them.
+ Incompatible with older versions of ccache.
+
+config DEBUG_INFO_BTF
+ bool "Generate BTF typeinfo"
+ depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
+ depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+ depends on BPF_SYSCALL
+ depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
+ help
+ Generate deduplicated BTF type information from DWARF debug info.
+ Turning this on expects presence of pahole tool, which will convert
+ DWARF type info into equivalent deduplicated BTF type info.
+
+config PAHOLE_HAS_SPLIT_BTF
+ def_bool PAHOLE_VERSION >= 119
+
+config PAHOLE_HAS_BTF_TAG
+ def_bool PAHOLE_VERSION >= 123
+ depends on CC_IS_CLANG
+ help
+ Decide whether pahole emits btf_tag attributes (btf_type_tag and
+ btf_decl_tag) or not. Currently only clang compiler implements
+ these attributes, so make the config depend on CC_IS_CLANG.
+
+config PAHOLE_HAS_LANG_EXCLUDE
+ def_bool PAHOLE_VERSION >= 124
+ help
+ Support for the --lang_exclude flag which makes pahole exclude
+ compilation units from the supplied language. Used in Kbuild to
+ omit Rust CUs which are not supported in version 1.24 of pahole,
+ otherwise it would emit malformed kernel and module binaries when
+ using DEBUG_INFO_BTF_MODULES.
+
+config DEBUG_INFO_BTF_MODULES
+ def_bool y
+ depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+ help
+ Generate compact split BTF type information for kernel modules.
+
+config MODULE_ALLOW_BTF_MISMATCH
+ bool "Allow loading modules with non-matching BTF type info"
+ depends on DEBUG_INFO_BTF_MODULES
+ help
+ For modules whose split BTF does not match vmlinux, load without
+ BTF rather than refusing to load. The default behavior with
+ module BTF enabled is to reject modules with such mismatches;
+ this option will still load module BTF where possible but ignore
+ it when a mismatch is found.
+
+config GDB_SCRIPTS
+ bool "Provide GDB scripts for kernel debugging"
+ help
+ This creates the required links to GDB helper scripts in the
+ build directory. If you load vmlinux into gdb, the helper
+ scripts will be automatically imported by gdb as well, and
+ additional functions are available to analyze a Linux kernel
+ instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
+ for further details.
+
+endif # DEBUG_INFO
+
+config FRAME_WARN
+ int "Warn for stack frames larger than"
+ range 0 8192
+ default 0 if KMSAN
+ default 2048 if GCC_PLUGIN_LATENT_ENTROPY
+ default 2048 if PARISC
+ default 1536 if (!64BIT && XTENSA)
+ default 1280 if KASAN && !64BIT
+ default 1024 if !64BIT
+ default 2048 if 64BIT
+ help
+ Tell the compiler to warn at build time for stack frames larger than this.
+ Setting this too low will cause a lot of warnings.
+ Setting it to 0 disables the warning.
+
+config STRIP_ASM_SYMS
+ bool "Strip assembler-generated symbols during link"
+ default n
+ help
+ Strip internal assembler-generated symbols during a link (symbols
+ that look like '.Lxxx') so they don't pollute the output of
+ get_wchan() and suchlike.
+
+config READABLE_ASM
+ bool "Generate readable assembler code"
+ depends on DEBUG_KERNEL
+ depends on CC_IS_GCC
+ help
+ Disable some compiler optimizations that tend to generate human unreadable
+ assembler output. This may make the kernel slightly slower, but it helps
+ to keep kernel developers who have to stare a lot at assembler listings
+ sane.
+
+config HEADERS_INSTALL
+ bool "Install uapi headers to usr/include"
+ depends on !UML
+ help
+ This option will install uapi headers (headers exported to user-space)
+ into the usr/include directory for use during the kernel build.
+ This is unneeded for building the kernel itself, but needed for some
+ user-space program samples. It is also needed by some features such
+ as uapi header sanity checks.
+
+config DEBUG_SECTION_MISMATCH
+ bool "Enable full Section mismatch analysis"
+ depends on CC_IS_GCC
+ help
+ The section mismatch analysis checks if there are illegal
+ references from one section to another section.
+ During linktime or runtime, some sections are dropped;
+ any use of code/data previously in these sections would
+ most likely result in an oops.
+ In the code, functions and variables are annotated with
+ __init,, etc. (see the full list in include/linux/init.h),
+ which results in the code/data being placed in specific sections.
+ The section mismatch analysis is always performed after a full
+ kernel build, and enabling this option causes the following
+ additional step to occur:
+ - Add the option -fno-inline-functions-called-once to gcc commands.
+ When inlining a function annotated with __init in a non-init
+ function, we would lose the section information and thus
+ the analysis would not catch the illegal reference.
+ This option tells gcc to inline less (but it does result in
+ a larger kernel).
+
+config SECTION_MISMATCH_WARN_ONLY
+ bool "Make section mismatch errors non-fatal"
+ default y
+ help
+ If you say N here, the build process will fail if there are any
+ section mismatch, instead of just throwing warnings.
+
+ If unsure, say Y.
+
+config DEBUG_FORCE_FUNCTION_ALIGN_64B
+ bool "Force all function address 64B aligned"
+ depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || RISCV || S390)
+ select FUNCTION_ALIGNMENT_64B
+ help
+ There are cases that a commit from one domain changes the function
+ address alignment of other domains, and cause magic performance
+ bump (regression or improvement). Enable this option will help to
+ verify if the bump is caused by function alignment changes, while
+ it will slightly increase the kernel size and affect icache usage.
+
+ It is mainly for debug and performance tuning use.
+
+#
+# Select this config option from the architecture Kconfig, if it
+# is preferred to always offer frame pointers as a config
+# option on the architecture (regardless of KERNEL_DEBUG):
+#
+config ARCH_WANT_FRAME_POINTERS
+ bool
+
+config FRAME_POINTER
+ bool "Compile the kernel with frame pointers"
+ depends on DEBUG_KERNEL && (M68K || UML || SUPERH) || ARCH_WANT_FRAME_POINTERS
+ default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
+ help
+ If you say Y here the resulting kernel image will be slightly
+ larger and slower, but it gives very useful debugging information
+ in case of kernel bugs. (precise oopses/stacktraces/warnings)
+
+config OBJTOOL
+ bool
+
+config STACK_VALIDATION
+ bool "Compile-time stack metadata validation"
+ depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER
+ select OBJTOOL
+ default n
+ help
+ Validate frame pointer rules at compile-time. This helps ensure that
+ runtime stack traces are more reliable.
+
+ For more information, see
+ tools/objtool/Documentation/objtool.txt.
+
+config NOINSTR_VALIDATION
+ bool
+ depends on HAVE_NOINSTR_VALIDATION && DEBUG_ENTRY
+ select OBJTOOL
+ default y
+
+config VMLINUX_MAP
+ bool "Generate vmlinux.map file when linking"
+ depends on EXPERT
+ help
+ Selecting this option will pass "-Map=vmlinux.map" to ld
+ when linking vmlinux. That file can be useful for verifying
+ and debugging magic section games, and for seeing which
+ pieces of code get eliminated with
+ CONFIG_LD_DEAD_CODE_DATA_ELIMINATION.
+
+config DEBUG_FORCE_WEAK_PER_CPU
+ bool "Force weak per-cpu definitions"
+ depends on DEBUG_KERNEL
+ help
+ s390 and alpha require percpu variables in modules to be
+ defined weak to work around addressing range issue which
+ puts the following two restrictions on percpu variable
+ definitions.
+
+ 1. percpu symbols must be unique whether static or not
+ 2. percpu variables can't be defined inside a function
+
+ To ensure that generic code follows the above rules, this
+ option forces all percpu variables to be defined as weak.
+
+endmenu # "Compiler options"
+
+menu "Generic Kernel Debugging Instruments"
+
+config MAGIC_SYSRQ
+ bool "Magic SysRq key"
+ depends on !UML
+ help
+ If you say Y here, you will have some control over the system even
+ if the system crashes for example during kernel debugging (e.g., you
+ will be able to flush the buffer cache to disk, reboot the system
+ immediately or dump some status information). This is accomplished
+ by pressing various keys while holding SysRq (Alt+PrintScreen). It
+ also works on a serial console (on PC hardware at least), if you
+ send a BREAK and then within 5 seconds a command keypress. The
+ keys are documented in <file:Documentation/admin-guide/sysrq.rst>.
+ Don't say Y unless you really know what this hack does.
+
+config MAGIC_SYSRQ_DEFAULT_ENABLE
+ hex "Enable magic SysRq key functions by default"
+ depends on MAGIC_SYSRQ
+ default 0x1
+ help
+ Specifies which SysRq key functions are enabled by default.
+ This may be set to 1 or 0 to enable or disable them all, or
+ to a bitmask as described in Documentation/admin-guide/sysrq.rst.
+
+config MAGIC_SYSRQ_SERIAL
+ bool "Enable magic SysRq key over serial"
+ depends on MAGIC_SYSRQ
+ default y
+ help
+ Many embedded boards have a disconnected TTL level serial which can
+ generate some garbage that can lead to spurious false sysrq detects.
+ This option allows you to decide whether you want to enable the
+ magic SysRq key.
+
+config MAGIC_SYSRQ_SERIAL_SEQUENCE
+ string "Char sequence that enables magic SysRq over serial"
+ depends on MAGIC_SYSRQ_SERIAL
+ default ""
+ help
+ Specifies a sequence of characters that can follow BREAK to enable
+ SysRq on a serial console.
+
+ If unsure, leave an empty string and the option will not be enabled.
+
+config DEBUG_FS
+ bool "Debug Filesystem"
+ help
+ debugfs is a virtual file system that kernel developers use to put
+ debugging files into. Enable this option to be able to read and
+ write to these files.
+
+ For detailed documentation on the debugfs API, see
+ Documentation/filesystems/.
+
+ If unsure, say N.
+
+choice
+ prompt "Debugfs default access"
+ depends on DEBUG_FS
+ default DEBUG_FS_ALLOW_ALL
+ help
+ This selects the default access restrictions for debugfs.
+ It can be overridden with kernel command line option
+ debugfs=[on,no-mount,off]. The restrictions apply for API access
+ and filesystem registration.
+
+config DEBUG_FS_ALLOW_ALL
+ bool "Access normal"
+ help
+ No restrictions apply. Both API and filesystem registration
+ is on. This is the normal default operation.
+
+config DEBUG_FS_DISALLOW_MOUNT
+ bool "Do not register debugfs as filesystem"
+ help
+ The API is open but filesystem is not loaded. Clients can still do
+ their work and read with debug tools that do not need
+ debugfs filesystem.
+
+config DEBUG_FS_ALLOW_NONE
+ bool "No access"
+ help
+ Access is off. Clients get -PERM when trying to create nodes in
+ debugfs tree and debugfs is not registered as a filesystem.
+ Client can then back-off or continue without debugfs access.
+
+endchoice
+
+source "lib/Kconfig.kgdb"
+source "lib/Kconfig.ubsan"
+source "lib/Kconfig.kcsan"
+
+endmenu
+
+menu "Networking Debugging"
+
+source "net/Kconfig.debug"
+
+endmenu # "Networking Debugging"
+
+menu "Memory Debugging"
+
+source "mm/Kconfig.debug"
+
+config DEBUG_OBJECTS
+ bool "Debug object operations"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, additional code will be inserted into the
+ kernel to track the life time of various objects and validate
+ the operations on those objects.
+
+config DEBUG_OBJECTS_SELFTEST
+ bool "Debug objects selftest"
+ depends on DEBUG_OBJECTS
+ help
+ This enables the selftest of the object debug code.
+
+config DEBUG_OBJECTS_FREE
+ bool "Debug objects in freed memory"
+ depends on DEBUG_OBJECTS
+ help
+ This enables checks whether a k/v free operation frees an area
+ which contains an object which has not been deactivated
+ properly. This can make kmalloc/kfree-intensive workloads
+ much slower.
+
+config DEBUG_OBJECTS_TIMERS
+ bool "Debug timer objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ timer routines to track the life time of timer objects and
+ validate the timer operations.
+
+config DEBUG_OBJECTS_WORK
+ bool "Debug work objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ work queue routines to track the life time of work objects and
+ validate the work operations.
+
+config DEBUG_OBJECTS_RCU_HEAD
+ bool "Debug RCU callbacks objects"
+ depends on DEBUG_OBJECTS
+ help
+ Enable this to turn on debugging of RCU list heads (call_rcu() usage).
+
+config DEBUG_OBJECTS_PERCPU_COUNTER
+ bool "Debug percpu counter objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ percpu counter routines to track the life time of percpu counter
+ objects and validate the percpu counter operations.
+
+config DEBUG_OBJECTS_ENABLE_DEFAULT
+ int "debug_objects bootup default value (0-1)"
+ range 0 1
+ default "1"
+ depends on DEBUG_OBJECTS
+ help
+ Debug objects boot parameter default value
+
+config SHRINKER_DEBUG
+ bool "Enable shrinker debugging support"
+ depends on DEBUG_FS
+ help
+ Say Y to enable the shrinker debugfs interface which provides
+ visibility into the kernel memory shrinkers subsystem.
+ Disable it to avoid an extra memory footprint.
+
+config DEBUG_STACK_USAGE
+ bool "Stack utilization instrumentation"
+ depends on DEBUG_KERNEL && !IA64
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T and sysrq-P debug output.
+
+ This option will slow down process creation somewhat.
+
+config SCHED_STACK_END_CHECK
+ bool "Detect stack corruption on calls to schedule()"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option checks for a stack overrun on calls to schedule().
+ If the stack end location is found to be over written always panic as
+ the content of the corrupted region can no longer be trusted.
+ This is to ensure no erroneous behaviour occurs which could result in
+ data corruption or a sporadic crash at a later stage once the region
+ is examined. The runtime overhead introduced is minimal.
+
+config ARCH_HAS_DEBUG_VM_PGTABLE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run DEBUG_VM_PGTABLE.
+
+config DEBUG_VM_IRQSOFF
+ def_bool DEBUG_VM && !PREEMPT_RT
+
+config DEBUG_VM
+ bool "Debug VM"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the virtual-memory system
+ that may impact performance.
+
+ If unsure, say N.
+
+config DEBUG_VM_SHOOT_LAZIES
+ bool "Debug MMU_LAZY_TLB_SHOOTDOWN implementation"
+ depends on DEBUG_VM
+ depends on MMU_LAZY_TLB_SHOOTDOWN
+ help
+ Enable additional IPIs that ensure lazy tlb mm references are removed
+ before the mm is freed.
+
+ If unsure, say N.
+
+config DEBUG_VM_MAPLE_TREE
+ bool "Debug VM maple trees"
+ depends on DEBUG_VM
+ select DEBUG_MAPLE_TREE
+ help
+ Enable VM maple tree debugging information and extra validations.
+
+ If unsure, say N.
+
+config DEBUG_VM_RB
+ bool "Debug VM red-black trees"
+ depends on DEBUG_VM
+ help
+ Enable VM red-black tree debugging information and extra validations.
+
+ If unsure, say N.
+
+config DEBUG_VM_PGFLAGS
+ bool "Debug page-flags operations"
+ depends on DEBUG_VM
+ help
+ Enables extra validation on page flags operations.
+
+ If unsure, say N.
+
+config DEBUG_VM_PGTABLE
+ bool "Debug arch page table for semantics compliance"
+ depends on MMU
+ depends on ARCH_HAS_DEBUG_VM_PGTABLE
+ default y if DEBUG_VM
+ help
+ This option provides a debug method which can be used to test
+ architecture page table helper functions on various platforms in
+ verifying if they comply with expected generic MM semantics. This
+ will help architecture code in making sure that any changes or
+ new additions of these helpers still conform to expected
+ semantics of the generic MM. Platforms will have to opt in for
+ this through ARCH_HAS_DEBUG_VM_PGTABLE.
+
+ If unsure, say N.
+
+config ARCH_HAS_DEBUG_VIRTUAL
+ bool
+
+config DEBUG_VIRTUAL
+ bool "Debug VM translations"
+ depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL
+ help
+ Enable some costly sanity checks in virtual to page code. This can
+ catch mistakes with virt_to_page() and friends.
+
+ If unsure, say N.
+
+config DEBUG_NOMMU_REGIONS
+ bool "Debug the global anon/private NOMMU mapping region tree"
+ depends on DEBUG_KERNEL && !MMU
+ help
+ This option causes the global tree of anonymous and private mapping
+ regions to be regularly checked for invalid topology.
+
+config DEBUG_MEMORY_INIT
+ bool "Debug memory initialisation" if EXPERT
+ default !EXPERT
+ help
+ Enable this for additional checks during memory initialisation.
+ The sanity checks verify aspects of the VM such as the memory model
+ and other information provided by the architecture. Verbose
+ information will be printed at KERN_DEBUG loglevel depending
+ on the mminit_loglevel= command-line option.
+
+ If unsure, say Y
+
+config MEMORY_NOTIFIER_ERROR_INJECT
+ tristate "Memory hotplug notifier error injection module"
+ depends on MEMORY_HOTPLUG && NOTIFIER_ERROR_INJECTION
+ help
+ This option provides the ability to inject artificial errors to
+ memory hotplug notifier chain callbacks. It is controlled through
+ debugfs interface under /sys/kernel/debug/notifier-error-inject/memory
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject memory hotplug offline error (-12 == -ENOMEM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/memory
+ # echo -12 > actions/MEM_GOING_OFFLINE/error
+ # echo offline > /sys/devices/system/memory/memoryXXX/state
+ bash: echo: write error: Cannot allocate memory
+
+ To compile this code as a module, choose M here: the module will
+ be called memory-notifier-error-inject.
+
+ If unsure, say N.
+
+config DEBUG_PER_CPU_MAPS
+ bool "Debug access to per_cpu maps"
+ depends on DEBUG_KERNEL
+ depends on SMP
+ help
+ Say Y to verify that the per_cpu map being accessed has
+ been set up. This adds a fair amount of code to kernel memory
+ and decreases performance.
+
+ Say N if unsure.
+
+config DEBUG_KMAP_LOCAL
+ bool "Debug kmap_local temporary mappings"
+ depends on DEBUG_KERNEL && KMAP_LOCAL
+ help
+ This option enables additional error checking for the kmap_local
+ infrastructure. Disable for production use.
+
+config ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ bool
+
+config DEBUG_KMAP_LOCAL_FORCE_MAP
+ bool "Enforce kmap_local temporary mappings"
+ depends on DEBUG_KERNEL && ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ select KMAP_LOCAL
+ select DEBUG_KMAP_LOCAL
+ help
+ This option enforces temporary mappings through the kmap_local
+ mechanism for non-highmem pages and on non-highmem systems.
+ Disable this for production systems!
+
+config DEBUG_HIGHMEM
+ bool "Highmem debugging"
+ depends on DEBUG_KERNEL && HIGHMEM
+ select DEBUG_KMAP_LOCAL_FORCE_MAP if ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP
+ select DEBUG_KMAP_LOCAL
+ help
+ This option enables additional error checking for high memory
+ systems. Disable for production systems.
+
+config HAVE_DEBUG_STACKOVERFLOW
+ bool
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW
+ help
+ Say Y here if you want to check for overflows of kernel, IRQ
+ and exception stacks (if your architecture uses them). This
+ option will show detailed messages if free stack space drops
+ below a certain limit.
+
+ These kinds of bugs usually occur when call-chains in the
+ kernel get too deep, especially when interrupts are
+ involved.
+
+ Use this in cases where you see apparently random memory
+ corruption, especially if it appears in 'struct thread_info'
+
+ If in doubt, say "N".
+
+source "lib/Kconfig.kasan"
+source "lib/Kconfig.kfence"
+source "lib/Kconfig.kmsan"
+
+endmenu # "Memory Debugging"
+
+config DEBUG_SHIRQ
+ bool "Debug shared IRQ handlers"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to generate a spurious interrupt just before a shared
+ interrupt handler is deregistered (generating one when registering
+ is currently disabled). Drivers need to handle this correctly. Some
+ don't and need to be caught.
+
+menu "Debug Oops, Lockups and Hangs"
+
+config PANIC_ON_OOPS
+ bool "Panic on Oops"
+ help
+ Say Y here to enable the kernel to panic when it oopses. This
+ has the same effect as setting oops=panic on the kernel command
+ line.
+
+ This feature is useful to ensure that the kernel does not do
+ anything erroneous after an oops which could result in data
+ corruption or other issues.
+
+ Say N if unsure.
+
+config PANIC_ON_OOPS_VALUE
+ int
+ range 0 1
+ default 0 if !PANIC_ON_OOPS
+ default 1 if PANIC_ON_OOPS
+
+config PANIC_TIMEOUT
+ int "panic timeout"
+ default 0
+ help
+ Set the timeout value (in seconds) until a reboot occurs when
+ the kernel panics. If n = 0, then we wait forever. A timeout
+ value n > 0 will wait n seconds before rebooting, while a timeout
+ value n < 0 will reboot immediately.
+
+config LOCKUP_DETECTOR
+ bool
+
+config SOFTLOCKUP_DETECTOR
+ bool "Detect Soft Lockups"
+ depends on DEBUG_KERNEL && !S390
+ select LOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to act as a watchdog to detect
+ soft lockups.
+
+ Softlockups are bugs that cause the kernel to loop in kernel
+ mode for more than 20 seconds, without giving other tasks a
+ chance to run. The current stack trace is displayed upon
+ detection and the system will stay locked up.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on SOFTLOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 20 seconds (configurable using the watchdog_thresh
+ sysctl), without giving other tasks a chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config HAVE_HARDLOCKUP_DETECTOR_BUDDY
+ bool
+ depends on SMP
+ default y
+
+#
+# Global switch whether to build a hardlockup detector at all. It is available
+# only when the architecture supports at least one implementation. There are
+# two exceptions. The hardlockup detector is never enabled on:
+#
+# s390: it reported many false positives there
+#
+# sparc64: has a custom implementation which is not using the common
+# hardlockup command line options and sysctl interface.
+#
+config HARDLOCKUP_DETECTOR
+ bool "Detect Hard Lockups"
+ depends on DEBUG_KERNEL && !S390 && !HARDLOCKUP_DETECTOR_SPARC64
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_BUDDY || HAVE_HARDLOCKUP_DETECTOR_ARCH
+ imply HARDLOCKUP_DETECTOR_PERF
+ imply HARDLOCKUP_DETECTOR_BUDDY
+ imply HARDLOCKUP_DETECTOR_ARCH
+ select LOCKUP_DETECTOR
+
+ help
+ Say Y here to enable the kernel to act as a watchdog to detect
+ hard lockups.
+
+ Hardlockups are bugs that cause the CPU to loop in kernel mode
+ for more than 10 seconds, without letting other interrupts have a
+ chance to run. The current stack trace is displayed upon detection
+ and the system will stay locked up.
+
+#
+# Note that arch-specific variants are always preferred.
+#
+config HARDLOCKUP_DETECTOR_PREFER_BUDDY
+ bool "Prefer the buddy CPU hardlockup detector"
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF && HAVE_HARDLOCKUP_DETECTOR_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ help
+ Say Y here to prefer the buddy hardlockup detector over the perf one.
+
+ With the buddy detector, each CPU uses its softlockup hrtimer
+ to check that the next CPU is processing hrtimer interrupts by
+ verifying that a counter is increasing.
+
+ This hardlockup detector is useful on systems that don't have
+ an arch-specific hardlockup detector or if resources needed
+ for the hardlockup detector are better used for other things.
+
+config HARDLOCKUP_DETECTOR_PERF
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_PERF && !HARDLOCKUP_DETECTOR_PREFER_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER
+
+config HARDLOCKUP_DETECTOR_BUDDY
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_PERF || HARDLOCKUP_DETECTOR_PREFER_BUDDY
+ depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER
+
+config HARDLOCKUP_DETECTOR_ARCH
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_ARCH
+ help
+ The arch-specific implementation of the hardlockup detector will
+ be used.
+
+#
+# Both the "perf" and "buddy" hardlockup detectors count hrtimer
+# interrupts. This config enables functions managing this common code.
+#
+config HARDLOCKUP_DETECTOR_COUNTS_HRTIMER
+ bool
+ select SOFTLOCKUP_DETECTOR
+
+#
+# Enables a timestamp based low pass filter to compensate for perf based
+# hard lockup detection which runs too fast due to turbo modes.
+#
+config HARDLOCKUP_CHECK_TIMESTAMP
+ bool
+
+config BOOTPARAM_HARDLOCKUP_PANIC
+ bool "Panic (Reboot) On Hard Lockups"
+ depends on HARDLOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to panic on "hard lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode with interrupts disabled for more than 10 seconds (configurable
+ using the watchdog_thresh sysctl).
+
+ Say N if unsure.
+
+config DETECT_HUNG_TASK
+ bool "Detect Hung Tasks"
+ depends on DEBUG_KERNEL
+ default SOFTLOCKUP_DETECTOR
+ help
+ Say Y here to enable the kernel to detect "hung tasks",
+ which are bugs that cause the task to be stuck in
+ uninterruptible "D" state indefinitely.
+
+ When a hung task is detected, the kernel will print the
+ current stack trace (which you should report), but the
+ task will stay in uninterruptible state. If lockdep is
+ enabled then all held locks will also be reported. This
+ feature has negligible overhead.
+
+config DEFAULT_HUNG_TASK_TIMEOUT
+ int "Default timeout for hung task detection (in seconds)"
+ depends on DETECT_HUNG_TASK
+ default 120
+ help
+ This option controls the default timeout (in seconds) used
+ to determine when a task has become non-responsive and should
+ be considered hung.
+
+ It can be adjusted at runtime via the kernel.hung_task_timeout_secs
+ sysctl or by writing a value to
+ /proc/sys/kernel/hung_task_timeout_secs.
+
+ A timeout of 0 disables the check. The default is two minutes.
+ Keeping the default should be fine in most cases.
+
+config BOOTPARAM_HUNG_TASK_PANIC
+ bool "Panic (Reboot) On Hung Tasks"
+ depends on DETECT_HUNG_TASK
+ help
+ Say Y here to enable the kernel to panic on "hung tasks",
+ which are bugs that cause the kernel to leave a task stuck
+ in uninterruptible "D" state.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ hung task has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a hung tasks must be resolved ASAP.
+
+ Say N if unsure.
+
+config WQ_WATCHDOG
+ bool "Detect Workqueue Stalls"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to enable stall detection on workqueues. If a
+ worker pool doesn't make forward progress on a pending work
+ item for over a given amount of time, 30s by default, a
+ warning message is printed along with dump of workqueue
+ state. This can be configured through kernel parameter
+ "workqueue.watchdog_thresh" and its sysfs counterpart.
+
+config WQ_CPU_INTENSIVE_REPORT
+ bool "Report per-cpu work items which hog CPU for too long"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to enable reporting of concurrency-managed per-cpu work
+ items that hog CPUs for longer than
+ workqueue.cpu_intensive_thresh_us. Workqueue automatically
+ detects and excludes them from concurrency management to prevent
+ them from stalling other per-cpu work items. Occassional
+ triggering may not necessarily indicate a problem. Repeated
+ triggering likely indicates that the work item should be switched
+ to use an unbound workqueue.
+
+config TEST_LOCKUP
+ tristate "Test module to generate lockups"
+ depends on m
+ help
+ This builds the "test_lockup" module that helps to make sure
+ that watchdogs and lockup detectors are working properly.
+
+ Depending on module parameters it could emulate soft or hard
+ lockup, "hung task", or locking arbitrary lock for a long time.
+ Also it could generate series of lockups with cooling-down periods.
+
+ If unsure, say N.
+
+endmenu # "Debug lockups and hangs"
+
+menu "Scheduler Debugging"
+
+config SCHED_DEBUG
+ bool "Collect scheduler debugging info"
+ depends on DEBUG_KERNEL && DEBUG_FS
+ default y
+ help
+ If you say Y here, the /sys/kernel/debug/sched file will be provided
+ that can help debug the scheduler. The runtime overhead of this
+ option is minimal.
+
+config SCHED_INFO
+ bool
+ default n
+
+config SCHEDSTATS
+ bool "Collect scheduler statistics"
+ depends on DEBUG_KERNEL && PROC_FS
+ select SCHED_INFO
+ help
+ If you say Y here, additional code will be inserted into the
+ scheduler and related routines to collect statistics about
+ scheduler behavior and provide them in /proc/schedstat. These
+ stats may be useful for both tuning and debugging the scheduler
+ If you aren't debugging the scheduler or trying to tune a specific
+ application, you can say N to avoid the very slight overhead
+ this adds.
+
+endmenu
+
+config DEBUG_TIMEKEEPING
+ bool "Enable extra timekeeping sanity checking"
+ help
+ This option will enable additional timekeeping sanity checks
+ which may be helpful when diagnosing issues where timekeeping
+ problems are suspected.
+
+ This may include checks in the timekeeping hotpaths, so this
+ option may have a (very small) performance impact to some
+ workloads.
+
+ If unsure, say N.
+
+config DEBUG_PREEMPT
+ bool "Debug preemptible kernel"
+ depends on DEBUG_KERNEL && PREEMPTION && TRACE_IRQFLAGS_SUPPORT
+ help
+ If you say Y here then the kernel will use a debug variant of the
+ commonly used smp_processor_id() function and will print warnings
+ if kernel code uses it in a preemption-unsafe way. Also, the kernel
+ will detect preemption count underflows.
+
+ This option has potential to introduce high runtime overhead,
+ depending on workload as it triggers debugging routines for each
+ this_cpu operation. It should only be used for debugging purposes.
+
+menu "Lock Debugging (spinlocks, mutexes, etc...)"
+
+config LOCK_DEBUGGING_SUPPORT
+ bool
+ depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
+ default y
+
+config PROVE_LOCKING
+ bool "Lock debugging: prove locking correctness"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select LOCKDEP
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES if !PREEMPT_RT
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select DEBUG_RWSEMS
+ select DEBUG_WW_MUTEX_SLOWPATH
+ select DEBUG_LOCK_ALLOC
+ select PREEMPT_COUNT if !ARCH_NO_PREEMPT
+ select TRACE_IRQFLAGS
+ default n
+ help
+ This feature enables the kernel to prove that all locking
+ that occurs in the kernel runtime is mathematically
+ correct: that under no circumstance could an arbitrary (and
+ not yet triggered) combination of observed locking
+ sequences (on an arbitrary number of CPUs, running an
+ arbitrary number of tasks and interrupt contexts) cause a
+ deadlock.
+
+ In short, this feature enables the kernel to report locking
+ related deadlocks before they actually occur.
+
+ The proof does not depend on how hard and complex a
+ deadlock scenario would be to trigger: how many
+ participant CPUs, tasks and irq-contexts would be needed
+ for it to trigger. The proof also does not depend on
+ timing: if a race and a resulting deadlock is possible
+ theoretically (no matter how unlikely the race scenario
+ is), it will be proven so and will immediately be
+ reported by the kernel (once the event is observed that
+ makes the deadlock theoretically possible).
+
+ If a deadlock is impossible (i.e. the locking rules, as
+ observed by the kernel, are mathematically correct), the
+ kernel reports nothing.
+
+ NOTE: this feature can also be enabled for rwlocks, mutexes
+ and rwsems - in which case all dependencies between these
+ different locking variants are observed and mapped too, and
+ the proof of observed correctness is also maintained for an
+ arbitrary combination of these separate locking variants.
+
+ For more details, see Documentation/locking/lockdep-design.rst.
+
+config PROVE_RAW_LOCK_NESTING
+ bool "Enable raw_spinlock - spinlock nesting checks"
+ depends on PROVE_LOCKING
+ default n
+ help
+ Enable the raw_spinlock vs. spinlock nesting checks which ensure
+ that the lock nesting rules for PREEMPT_RT enabled kernels are
+ not violated.
+
+ NOTE: There are known nesting problems. So if you enable this
+ option expect lockdep splats until these problems have been fully
+ addressed which is work in progress. This config switch allows to
+ identify and analyze these problems. It will be removed and the
+ check permanently enabled once the main issues have been fixed.
+
+ If unsure, select N.
+
+config LOCK_STAT
+ bool "Lock usage statistics"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select LOCKDEP
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES if !PREEMPT_RT
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select DEBUG_LOCK_ALLOC
+ default n
+ help
+ This feature enables tracking lock contention points
+
+ For more details, see Documentation/locking/lockstat.rst
+
+ This also enables lock events required by "perf lock",
+ subcommand of perf.
+ If you want to use "perf lock", you also need to turn on
+ CONFIG_EVENT_TRACING.
+
+ CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
+ (CONFIG_LOCKDEP defines "acquire" and "release" events.)
+
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_SPINLOCK
+ bool "Spinlock and rw-lock debugging: basic checks"
+ depends on DEBUG_KERNEL
+ select UNINLINE_SPIN_UNLOCK
+ help
+ Say Y here and build SMP to catch missing spinlock initialization
+ and certain other kinds of spinlock errors commonly made. This is
+ best used in conjunction with the NMI watchdog so that spinlock
+ deadlocks are also debuggable.
+
+config DEBUG_MUTEXES
+ bool "Mutex debugging: basic checks"
+ depends on DEBUG_KERNEL && !PREEMPT_RT
+ help
+ This feature allows mutex semantics violations to be detected and
+ reported.
+
+config DEBUG_WW_MUTEX_SLOWPATH
+ bool "Wait/wound mutex debugging: Slowpath testing"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select DEBUG_LOCK_ALLOC
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES if !PREEMPT_RT
+ select DEBUG_RT_MUTEXES if PREEMPT_RT
+ help
+ This feature enables slowpath testing for w/w mutex users by
+ injecting additional -EDEADLK wound/backoff cases. Together with
+ the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
+ will test all possible w/w mutex interface abuse with the
+ exception of simply not acquiring all the required locks.
+ Note that this feature can introduce significant overhead, so
+ it really should not be enabled in a production or distro kernel,
+ even a debug kernel. If you are a driver writer, enable it. If
+ you are a distro, do not.
+
+config DEBUG_RWSEMS
+ bool "RW Semaphore debugging: basic checks"
+ depends on DEBUG_KERNEL
+ help
+ This debugging feature allows mismatched rw semaphore locks
+ and unlocks to be detected and reported.
+
+config DEBUG_LOCK_ALLOC
+ bool "Lock debugging: detect incorrect freeing of live locks"
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select DEBUG_SPINLOCK
+ select DEBUG_MUTEXES if !PREEMPT_RT
+ select DEBUG_RT_MUTEXES if RT_MUTEXES
+ select LOCKDEP
+ help
+ This feature will check whether any held lock (spinlock, rwlock,
+ mutex or rwsem) is incorrectly freed by the kernel, via any of the
+ memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
+ vfree(), etc.), whether a live lock is incorrectly reinitialized via
+ spin_lock_init()/mutex_init()/etc., or whether there is any lock
+ held during task exit.
+
+config LOCKDEP
+ bool
+ depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+ select STACKTRACE
+ select KALLSYMS
+ select KALLSYMS_ALL
+
+config LOCKDEP_SMALL
+ bool
+
+config LOCKDEP_BITS
+ int "Bitsize for MAX_LOCKDEP_ENTRIES"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 15
+ help
+ Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
+
+config LOCKDEP_CHAINS_BITS
+ int "Bitsize for MAX_LOCKDEP_CHAINS"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 16
+ help
+ Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
+
+config LOCKDEP_STACK_TRACE_BITS
+ int "Bitsize for MAX_STACK_TRACE_ENTRIES"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 19
+ help
+ Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
+
+config LOCKDEP_STACK_TRACE_HASH_BITS
+ int "Bitsize for STACK_TRACE_HASH_SIZE"
+ depends on LOCKDEP && !LOCKDEP_SMALL
+ range 10 30
+ default 14
+ help
+ Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
+
+config LOCKDEP_CIRCULAR_QUEUE_BITS
+ int "Bitsize for elements in circular_queue struct"
+ depends on LOCKDEP
+ range 10 30
+ default 12
+ help
+ Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.
+
+config DEBUG_LOCKDEP
+ bool "Lock dependency engine debugging"
+ depends on DEBUG_KERNEL && LOCKDEP
+ select DEBUG_IRQFLAGS
+ help
+ If you say Y here, the lock dependency engine will do
+ additional runtime checks to debug itself, at the price
+ of more runtime overhead.
+
+config DEBUG_ATOMIC_SLEEP
+ bool "Sleep inside atomic section checking"
+ select PREEMPT_COUNT
+ depends on DEBUG_KERNEL
+ depends on !ARCH_NO_PREEMPT
+ help
+ If you say Y here, various routines which may sleep will become very
+ noisy if they are called inside atomic sections: when a spinlock is
+ held, inside an rcu read side critical section, inside preempt disabled
+ sections, inside an interrupt, etc...
+
+config DEBUG_LOCKING_API_SELFTESTS
+ bool "Locking API boot-time self-tests"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you want the kernel to run a short self-test during
+ bootup. The self-test checks whether common types of locking bugs
+ are detected by debugging mechanisms or not. (if you disable
+ lock debugging then those bugs won't be detected of course.)
+ The following locking APIs are covered: spinlocks, rwlocks,
+ mutexes and rwsems.
+
+config LOCK_TORTURE_TEST
+ tristate "torture tests for locking"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ help
+ This option provides a kernel module that runs torture tests
+ on kernel locking primitives. The kernel module may be built
+ after the fact on the running kernel to be tested, if desired.
+
+ Say Y here if you want kernel locking-primitive torture tests
+ to be built into the kernel.
+ Say M if you want these torture tests to build as a module.
+ Say N if you are unsure.
+
+config WW_MUTEX_SELFTEST
+ tristate "Wait/wound mutex selftests"
+ help
+ This option provides a kernel module that runs tests on the
+ on the struct ww_mutex locking API.
+
+ It is recommended to enable DEBUG_WW_MUTEX_SLOWPATH in conjunction
+ with this test harness.
+
+ Say M if you want these self tests to build as a module.
+ Say N if you are unsure.
+
+config SCF_TORTURE_TEST
+ tristate "torture tests for smp_call_function*()"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ help
+ This option provides a kernel module that runs torture tests
+ on the smp_call_function() family of primitives. The kernel
+ module may be built after the fact on the running kernel to
+ be tested, if desired.
+
+config CSD_LOCK_WAIT_DEBUG
+ bool "Debugging for csd_lock_wait(), called from smp_call_function*()"
+ depends on DEBUG_KERNEL
+ depends on 64BIT
+ default n
+ help
+ This option enables debug prints when CPUs are slow to respond
+ to the smp_call_function*() IPI wrappers. These debug prints
+ include the IPI handler function currently executing (if any)
+ and relevant stack traces.
+
+config CSD_LOCK_WAIT_DEBUG_DEFAULT
+ bool "Default csd_lock_wait() debugging on at boot time"
+ depends on CSD_LOCK_WAIT_DEBUG
+ depends on 64BIT
+ default n
+ help
+ This option causes the csdlock_debug= kernel boot parameter to
+ default to 1 (basic debugging) instead of 0 (no debugging).
+
+endmenu # lock debugging
+
+config TRACE_IRQFLAGS
+ depends on TRACE_IRQFLAGS_SUPPORT
+ bool
+ help
+ Enables hooks to interrupt enabling and disabling for
+ either tracing or lock debugging.
+
+config TRACE_IRQFLAGS_NMI
+ def_bool y
+ depends on TRACE_IRQFLAGS
+ depends on TRACE_IRQFLAGS_NMI_SUPPORT
+
+config NMI_CHECK_CPU
+ bool "Debugging for CPUs failing to respond to backtrace requests"
+ depends on DEBUG_KERNEL
+ depends on X86
+ default n
+ help
+ Enables debug prints when a CPU fails to respond to a given
+ backtrace NMI. These prints provide some reasons why a CPU
+ might legitimately be failing to respond, for example, if it
+ is offline of if ignore_nmis is set.
+
+config DEBUG_IRQFLAGS
+ bool "Debug IRQ flag manipulation"
+ help
+ Enables checks for potentially unsafe enabling or disabling of
+ interrupts, such as calling raw_local_irq_restore() when interrupts
+ are enabled.
+
+config STACKTRACE
+ bool "Stack backtrace support"
+ depends on STACKTRACE_SUPPORT
+ help
+ This option causes the kernel to create a /proc/pid/stack for
+ every process, showing its current stack trace.
+ It is also used by various kernel debugging features that require
+ stack trace generation.
+
+config WARN_ALL_UNSEEDED_RANDOM
+ bool "Warn for all uses of unseeded randomness"
+ default n
+ help
+ Some parts of the kernel contain bugs relating to their use of
+ cryptographically secure random numbers before it's actually possible
+ to generate those numbers securely. This setting ensures that these
+ flaws don't go unnoticed, by enabling a message, should this ever
+ occur. This will allow people with obscure setups to know when things
+ are going wrong, so that they might contact developers about fixing
+ it.
+
+ Unfortunately, on some models of some architectures getting
+ a fully seeded CRNG is extremely difficult, and so this can
+ result in dmesg getting spammed for a surprisingly long
+ time. This is really bad from a security perspective, and
+ so architecture maintainers really need to do what they can
+ to get the CRNG seeded sooner after the system is booted.
+ However, since users cannot do anything actionable to
+ address this, by default this option is disabled.
+
+ Say Y here if you want to receive warnings for all uses of
+ unseeded randomness. This will be of use primarily for
+ those developers interested in improving the security of
+ Linux kernels running on their architecture (or
+ subarchitecture).
+
+config DEBUG_KOBJECT
+ bool "kobject debugging"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, some extra kobject debugging messages will be sent
+ to the syslog.
+
+config DEBUG_KOBJECT_RELEASE
+ bool "kobject release debugging"
+ depends on DEBUG_OBJECTS_TIMERS
+ help
+ kobjects are reference counted objects. This means that their
+ last reference count put is not predictable, and the kobject can
+ live on past the point at which a driver decides to drop its
+ initial reference to the kobject gained on allocation. An
+ example of this would be a struct device which has just been
+ unregistered.
+
+ However, some buggy drivers assume that after such an operation,
+ the memory backing the kobject can be immediately freed. This
+ goes completely against the principles of a refcounted object.
+
+ If you say Y here, the kernel will delay the release of kobjects
+ on the last reference count to improve the visibility of this
+ kind of kobject release bug.
+
+config HAVE_DEBUG_BUGVERBOSE
+ bool
+
+menu "Debug kernel data structures"
+
+config DEBUG_LIST
+ bool "Debug linked list manipulation"
+ depends on DEBUG_KERNEL
+ select LIST_HARDENED
+ help
+ Enable this to turn on extended checks in the linked-list walking
+ routines.
+
+ This option trades better quality error reports for performance, and
+ is more suitable for kernel debugging. If you care about performance,
+ you should only enable CONFIG_LIST_HARDENED instead.
+
+ If unsure, say N.
+
+config DEBUG_PLIST
+ bool "Debug priority linked list manipulation"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the priority-ordered
+ linked-list (plist) walking routines. This checks the entire
+ list multiple times during each manipulation.
+
+ If unsure, say N.
+
+config DEBUG_SG
+ bool "Debug SG table operations"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on checks on scatter-gather tables. This can
+ help find problems with drivers that do not properly initialize
+ their sg tables.
+
+ If unsure, say N.
+
+config DEBUG_NOTIFIERS
+ bool "Debug notifier call chains"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on sanity checking for notifier call chains.
+ This is most useful for kernel developers to make sure that
+ modules properly unregister themselves from notifier chains.
+ This is a relatively cheap check but if you care about maximum
+ performance, say N.
+
+config DEBUG_MAPLE_TREE
+ bool "Debug maple trees"
+ depends on DEBUG_KERNEL
+ help
+ Enable maple tree debugging information and extra validations.
+
+ If unsure, say N.
+
+endmenu
+
+source "kernel/rcu/Kconfig.debug"
+
+config DEBUG_WQ_FORCE_RR_CPU
+ bool "Force round-robin CPU selection for unbound work items"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ Workqueue used to implicitly guarantee that work items queued
+ without explicit CPU specified are put on the local CPU. This
+ guarantee is no longer true and while local CPU is still
+ preferred work items may be put on foreign CPUs. Kernel
+ parameter "workqueue.debug_force_rr_cpu" is added to force
+ round-robin CPU selection to flush out usages which depend on the
+ now broken guarantee. This config option enables the debug
+ feature by default. When enabled, memory and cache locality will
+ be impacted.
+
+config CPU_HOTPLUG_STATE_CONTROL
+ bool "Enable CPU hotplug state control"
+ depends on DEBUG_KERNEL
+ depends on HOTPLUG_CPU
+ default n
+ help
+ Allows to write steps between "offline" and "online" to the CPUs
+ sysfs target file so states can be stepped granular. This is a debug
+ option for now as the hotplug machinery cannot be stopped and
+ restarted at arbitrary points yet.
+
+ Say N if your are unsure.
+
+config LATENCYTOP
+ bool "Latency measuring infrastructure"
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+ depends on PROC_FS
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ select KALLSYMS
+ select KALLSYMS_ALL
+ select STACKTRACE
+ select SCHEDSTATS
+ help
+ Enable this option if you want to use the LatencyTOP tool
+ to find out which userspace is blocking on what kernel operations.
+
+config DEBUG_CGROUP_REF
+ bool "Disable inlining of cgroup css reference count functions"
+ depends on DEBUG_KERNEL
+ depends on CGROUPS
+ depends on KPROBES
+ default n
+ help
+ Force cgroup css reference count functions to not be inlined so
+ that they can be kprobed for debugging.
+
+source "kernel/trace/Kconfig"
+
+config PROVIDE_OHCI1394_DMA_INIT
+ bool "Remote debugging over FireWire early on boot"
+ depends on PCI && X86
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+ this feature to remotely access the memory of the crashed machine
+ over FireWire. This employs remote DMA as part of the OHCI1394
+ specification which is now the standard for FireWire controllers.
+
+ With remote DMA, you can monitor the printk buffer remotely using
+ firescope and access all memory below 4GB using fireproxy from gdb.
+ Even controlling a kernel debugger is possible using remote DMA.
+
+ Usage:
+
+ If ohci1394_dma=early is used as boot parameter, it will initialize
+ all OHCI1394 controllers which are found in the PCI config space.
+
+ As all changes to the FireWire bus such as enabling and disabling
+ devices cause a bus reset and thereby disable remote DMA for all
+ devices, be sure to have the cable plugged and FireWire enabled on
+ the debugging host before booting the debug target for debugging.
+
+ This code (~1k) is freed after boot. By then, the firewire stack
+ in charge of the OHCI-1394 controllers should be used instead.
+
+ See Documentation/core-api/debugging-via-ohci1394.rst for more information.
+
+source "samples/Kconfig"
+
+config ARCH_HAS_DEVMEM_IS_ALLOWED
+ bool
+
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU && DEVMEM
+ depends on ARCH_HAS_DEVMEM_IS_ALLOWED || GENERIC_LIB_DEVMEM_IS_ALLOWED
+ default y if PPC || X86 || ARM64
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel. Note that with PAT support
+ enabled, even in this case there are restrictions on /dev/mem
+ use due to the cache aliasing requirements.
+
+ If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
+ file only allows userspace access to PCI space and the BIOS code and
+ data regions. This is sufficient for dosemu and X and all common
+ users of /dev/mem.
+
+ If in doubt, say Y.
+
+config IO_STRICT_DEVMEM
+ bool "Filter I/O access to /dev/mem"
+ depends on STRICT_DEVMEM
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ io-memory regardless of whether a driver is actively using that
+ range. Accidental access to this is obviously disastrous, but
+ specific access can be used by people debugging kernel drivers.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to *idle* io-memory ranges (see /proc/iomem) This
+ may break traditional users of /dev/mem (dosemu, legacy X, etc...)
+ if the driver using a given range cannot be disabled.
+
+ If in doubt, say Y.
+
+menu "$(SRCARCH) Debugging"
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu
+
+menu "Kernel Testing and Coverage"
+
+source "lib/kunit/Kconfig"
+
+config NOTIFIER_ERROR_INJECTION
+ tristate "Notifier error injection"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
+ help
+ This option provides the ability to inject artificial errors to
+ specified notifier chain callbacks. It is useful to test the error
+ handling of notifier call chain failures.
+
+ Say N if unsure.
+
+config PM_NOTIFIER_ERROR_INJECT
+ tristate "PM notifier error injection module"
+ depends on PM && NOTIFIER_ERROR_INJECTION
+ default m if PM_DEBUG
+ help
+ This option provides the ability to inject artificial errors to
+ PM notifier chain callbacks. It is controlled through debugfs
+ interface /sys/kernel/debug/notifier-error-inject/pm
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject PM suspend error (-12 = -ENOMEM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/pm/
+ # echo -12 > actions/PM_SUSPEND_PREPARE/error
+ # echo mem > /sys/power/state
+ bash: echo: write error: Cannot allocate memory
+
+ To compile this code as a module, choose M here: the module will
+ be called pm-notifier-error-inject.
+
+ If unsure, say N.
+
+config OF_RECONFIG_NOTIFIER_ERROR_INJECT
+ tristate "OF reconfig notifier error injection module"
+ depends on OF_DYNAMIC && NOTIFIER_ERROR_INJECTION
+ help
+ This option provides the ability to inject artificial errors to
+ OF reconfig notifier chain callbacks. It is controlled
+ through debugfs interface under
+ /sys/kernel/debug/notifier-error-inject/OF-reconfig/
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ To compile this code as a module, choose M here: the module will
+ be called of-reconfig-notifier-error-inject.
+
+ If unsure, say N.
+
+config NETDEV_NOTIFIER_ERROR_INJECT
+ tristate "Netdev notifier error injection module"
+ depends on NET && NOTIFIER_ERROR_INJECTION
+ help
+ This option provides the ability to inject artificial errors to
+ netdevice notifier chain callbacks. It is controlled through debugfs
+ interface /sys/kernel/debug/notifier-error-inject/netdev
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject netdevice mtu change error (-22 = -EINVAL)
+
+ # cd /sys/kernel/debug/notifier-error-inject/netdev
+ # echo -22 > actions/NETDEV_CHANGEMTU/error
+ # ip link set eth0 mtu 1024
+ RTNETLINK answers: Invalid argument
+
+ To compile this code as a module, choose M here: the module will
+ be called netdev-notifier-error-inject.
+
+ If unsure, say N.
+
+config FUNCTION_ERROR_INJECTION
+ bool "Fault-injections of functions"
+ depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+ help
+ Add fault injections into various functions that are annotated with
+ ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
+ value of these functions. This is useful to test error paths of code.
+
+ If unsure, say N
+
+config FAULT_INJECTION
+ bool "Fault-injection framework"
+ depends on DEBUG_KERNEL
+ help
+ Provide fault-injection framework.
+ For more details, see Documentation/fault-injection/.
+
+config FAILSLAB
+ bool "Fault-injection capability for kmalloc"
+ depends on FAULT_INJECTION
+ depends on SLAB || SLUB
+ help
+ Provide fault-injection capability for kmalloc.
+
+config FAIL_PAGE_ALLOC
+ bool "Fault-injection capability for alloc_pages()"
+ depends on FAULT_INJECTION
+ help
+ Provide fault-injection capability for alloc_pages().
+
+config FAULT_INJECTION_USERCOPY
+ bool "Fault injection capability for usercopy functions"
+ depends on FAULT_INJECTION
+ help
+ Provides fault-injection capability to inject failures
+ in usercopy functions (copy_from_user(), get_user(), ...).
+
+config FAIL_MAKE_REQUEST
+ bool "Fault-injection capability for disk IO"
+ depends on FAULT_INJECTION && BLOCK
+ help
+ Provide fault-injection capability for disk IO.
+
+config FAIL_IO_TIMEOUT
+ bool "Fault-injection capability for faking disk interrupts"
+ depends on FAULT_INJECTION && BLOCK
+ help
+ Provide fault-injection capability on end IO handling. This
+ will make the block layer "forget" an interrupt as configured,
+ thus exercising the error handling.
+
+ Only works with drivers that use the generic timeout handling,
+ for others it won't do anything.
+
+config FAIL_FUTEX
+ bool "Fault-injection capability for futexes"
+ select DEBUG_FS
+ depends on FAULT_INJECTION && FUTEX
+ help
+ Provide fault-injection capability for futexes.
+
+config FAULT_INJECTION_DEBUG_FS
+ bool "Debugfs entries for fault-injection capabilities"
+ depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+ help
+ Enable configuration of fault-injection capabilities via debugfs.
+
+config FAIL_FUNCTION
+ bool "Fault-injection capability for functions"
+ depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
+ help
+ Provide function-based fault-injection capability.
+ This will allow you to override a specific function with a return
+ with given return value. As a result, function caller will see
+ an error value and have to handle it. This is useful to test the
+ error handling in various subsystems.
+
+config FAIL_MMC_REQUEST
+ bool "Fault-injection capability for MMC IO"
+ depends on FAULT_INJECTION_DEBUG_FS && MMC
+ help
+ Provide fault-injection capability for MMC IO.
+ This will make the mmc core return data errors. This is
+ useful to test the error handling in the mmc block device
+ and to test how the mmc host driver handles retries from
+ the block device.
+
+config FAIL_SUNRPC
+ bool "Fault-injection capability for SunRPC"
+ depends on FAULT_INJECTION_DEBUG_FS && SUNRPC_DEBUG
+ help
+ Provide fault-injection capability for SunRPC and
+ its consumers.
+
+config FAULT_INJECTION_CONFIGFS
+ bool "Configfs interface for fault-injection capabilities"
+ depends on FAULT_INJECTION
+ select CONFIGFS_FS
+ help
+ This option allows configfs-based drivers to dynamically configure
+ fault-injection via configfs. Each parameter for driver-specific
+ fault-injection can be made visible as a configfs attribute in a
+ configfs group.
+
+
+config FAULT_INJECTION_STACKTRACE_FILTER
+ bool "stacktrace filter for fault-injection capabilities"
+ depends on FAULT_INJECTION
+ depends on (FAULT_INJECTION_DEBUG_FS || FAULT_INJECTION_CONFIGFS) && STACKTRACE_SUPPORT
+ select STACKTRACE
+ depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
+ help
+ Provide stacktrace filter for fault-injection capabilities
+
+config ARCH_HAS_KCOV
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
+
+config CC_HAS_SANCOV_TRACE_PC
+ def_bool $(cc-option,-fsanitize-coverage=trace-pc)
+
+
+config KCOV
+ bool "Code coverage for fuzzing"
+ depends on ARCH_HAS_KCOV
+ depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
+ GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
+ select DEBUG_FS
+ select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
+ select OBJTOOL if HAVE_NOINSTR_HACK
+ help
+ KCOV exposes kernel code coverage information in a form suitable
+ for coverage-guided fuzzing (randomized testing).
+
+ If RANDOMIZE_BASE is enabled, PC values will not be stable across
+ different machines and across reboots. If you need stable PC values,
+ disable RANDOMIZE_BASE.
+
+ For more details, see Documentation/dev-tools/kcov.rst.
+
+config KCOV_ENABLE_COMPARISONS
+ bool "Enable comparison operands collection by KCOV"
+ depends on KCOV
+ depends on $(cc-option,-fsanitize-coverage=trace-cmp)
+ help
+ KCOV also exposes operands of every comparison in the instrumented
+ code along with operand sizes and PCs of the comparison instructions.
+ These operands can be used by fuzzing engines to improve the quality
+ of fuzzing coverage.
+
+config KCOV_INSTRUMENT_ALL
+ bool "Instrument all code by default"
+ depends on KCOV
+ default y
+ help
+ If you are doing generic system call fuzzing (like e.g. syzkaller),
+ then you will want to instrument the whole kernel and you should
+ say y here. If you are doing more targeted fuzzing (like e.g.
+ filesystem fuzzing with AFL) then you will want to enable coverage
+ for more specific subsets of files, and should say n here.
+
+config KCOV_IRQ_AREA_SIZE
+ hex "Size of interrupt coverage collection area in words"
+ depends on KCOV
+ default 0x40000
+ help
+ KCOV uses preallocated per-cpu areas to collect coverage from
+ soft interrupts. This specifies the size of those areas in the
+ number of unsigned long words.
+
+menuconfig RUNTIME_TESTING_MENU
+ bool "Runtime Testing"
+ def_bool y
+
+if RUNTIME_TESTING_MENU
+
+config TEST_DHRY
+ tristate "Dhrystone benchmark test"
+ help
+ Enable this to include the Dhrystone 2.1 benchmark. This test
+ calculates the number of Dhrystones per second, and the number of
+ DMIPS (Dhrystone MIPS) obtained when the Dhrystone score is divided
+ by 1757 (the number of Dhrystones per second obtained on the VAX
+ 11/780, nominally a 1 MIPS machine).
+
+ To run the benchmark, it needs to be enabled explicitly, either from
+ the kernel command line (when built-in), or from userspace (when
+ built-in or modular.
+
+ Run once during kernel boot:
+
+ test_dhry.run
+
+ Set number of iterations from kernel command line:
+
+ test_dhry.iterations=<n>
+
+ Set number of iterations from userspace:
+
+ echo <n> > /sys/module/test_dhry/parameters/iterations
+
+ Trigger manual run from userspace:
+
+ echo y > /sys/module/test_dhry/parameters/run
+
+ If the number of iterations is <= 0, the test will devise a suitable
+ number of iterations (test runs for at least 2s) automatically.
+ This process takes ca. 4s.
+
+ If unsure, say N.
+
+config LKDTM
+ tristate "Linux Kernel Dump Test Tool Module"
+ depends on DEBUG_FS
+ help
+ This module enables testing of the different dumping mechanisms by
+ inducing system failures at predefined crash points.
+ If you don't need it: say N
+ Choose M here to compile this code as a module. The module will be
+ called lkdtm.
+
+ Documentation on how to use the module can be found in
+ Documentation/fault-injection/provoke-crashes.rst
+
+config CPUMASK_KUNIT_TEST
+ tristate "KUnit test for cpumask" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable to turn on cpumask tests, running at boot or module load time.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config TEST_LIST_SORT
+ tristate "Linked list sorting test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this to turn on 'list_sort()' function test. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
+config TEST_MIN_HEAP
+ tristate "Min heap test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on min heap function tests. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
+config TEST_SORT
+ tristate "Array-based sort test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the self-test function of 'sort()' at boot,
+ or at module load time.
+
+ If unsure, say N.
+
+config TEST_DIV64
+ tristate "64bit/32bit division and modulo test"
+ depends on DEBUG_KERNEL || m
+ help
+ Enable this to turn on 'do_div()' function test. This test is
+ executed only once during system boot (so affects only boot time),
+ or at module load time.
+
+ If unsure, say N.
+
+config TEST_IOV_ITER
+ tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this to turn on testing of the operation of the I/O iterator
+ (iov_iter). This test is executed only once during system boot (so
+ affects only boot time), or at module load time.
+
+ If unsure, say N.
+
+config KPROBES_SANITY_TEST
+ tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS
+ depends on DEBUG_KERNEL
+ depends on KPROBES
+ depends on KUNIT
+ select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ default KUNIT_ALL_TESTS
+ help
+ This option provides for testing basic kprobes functionality on
+ boot. Samples of kprobe and kretprobe are inserted and
+ verified for functionality.
+
+ Say N if you are unsure.
+
+config FPROBE_SANITY_TEST
+ bool "Self test for fprobe"
+ depends on DEBUG_KERNEL
+ depends on FPROBE
+ depends on KUNIT=y
+ help
+ This option will enable testing the fprobe when the system boot.
+ A series of tests are made to verify that the fprobe is functioning
+ properly.
+
+ Say N if you are unsure.
+
+config BACKTRACE_SELF_TEST
+ tristate "Self test for the backtrace code"
+ depends on DEBUG_KERNEL
+ help
+ This option provides a kernel module that can be used to test
+ the kernel stack backtrace code. This option is not useful
+ for distributions or general kernels, but only for kernel
+ developers working on architecture code.
+
+ Note that if you want to also test saved backtraces, you will
+ have to enable STACKTRACE as well.
+
+ Say N if you are unsure.
+
+config TEST_REF_TRACKER
+ tristate "Self test for reference tracker"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+ select REF_TRACKER
+ help
+ This option provides a kernel module performing tests
+ using reference tracker infrastructure.
+
+ Say N if you are unsure.
+
+config RBTREE_TEST
+ tristate "Red-Black tree test"
+ depends on DEBUG_KERNEL
+ help
+ A benchmark measuring the performance of the rbtree library.
+ Also includes rbtree invariant checks.
+
+config REED_SOLOMON_TEST
+ tristate "Reed-Solomon library test"
+ depends on DEBUG_KERNEL || m
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC16
+ select REED_SOLOMON_DEC16
+ help
+ This option enables the self-test function of rslib at boot,
+ or at module load time.
+
+ If unsure, say N.
+
+config INTERVAL_TREE_TEST
+ tristate "Interval tree test"
+ depends on DEBUG_KERNEL
+ select INTERVAL_TREE
+ help
+ A benchmark measuring the performance of the interval tree library
+
+config PERCPU_TEST
+ tristate "Per cpu operations test"
+ depends on m && DEBUG_KERNEL
+ help
+ Enable this option to build test module which validates per-cpu
+ operations.
+
+ If unsure, say N.
+
+config ATOMIC64_SELFTEST
+ tristate "Perform an atomic64_t self-test"
+ help
+ Enable this option to test the atomic64_t functions at boot or
+ at module load time.
+
+ If unsure, say N.
+
+config ASYNC_RAID6_TEST
+ tristate "Self test for hardware accelerated raid6 recovery"
+ depends on ASYNC_RAID6_RECOV
+ select ASYNC_MEMCPY
+ help
+ This is a one-shot self test that permutes through the
+ recovery of all the possible two disk failure scenarios for a
+ N-disk array. Recovery is performed with the asynchronous
+ raid6 recovery routines, and will optionally use an offload
+ engine if one is available.
+
+ If unsure, say N.
+
+config TEST_HEXDUMP
+ tristate "Test functions located in the hexdump module at runtime"
+
+config STRING_SELFTEST
+ tristate "Test string functions at runtime"
+
+config TEST_STRING_HELPERS
+ tristate "Test functions located in the string_helpers module at runtime"
+
+config TEST_KSTRTOX
+ tristate "Test kstrto*() family of functions at runtime"
+
+config TEST_PRINTF
+ tristate "Test printf() family of functions at runtime"
+
+config TEST_SCANF
+ tristate "Test scanf() family of functions at runtime"
+
+config TEST_BITMAP
+ tristate "Test bitmap_*() family of functions at runtime"
+ help
+ Enable this option to test the bitmap functions at boot.
+
+ If unsure, say N.
+
+config TEST_UUID
+ tristate "Test functions located in the uuid module at runtime"
+
+config TEST_XARRAY
+ tristate "Test the XArray code at runtime"
+
+config TEST_MAPLE_TREE
+ tristate "Test the Maple Tree code at runtime or module load"
+ help
+ Enable this option to test the maple tree code functions at boot, or
+ when the module is loaded. Enable "Debug Maple Trees" will enable
+ more verbose output on failures.
+
+ If unsure, say N.
+
+config TEST_RHASHTABLE
+ tristate "Perform selftest on resizable hash table"
+ help
+ Enable this option to test the rhashtable functions at boot.
+
+ If unsure, say N.
+
+config TEST_IDA
+ tristate "Perform selftest on IDA functions"
+
+config TEST_PARMAN
+ tristate "Perform selftest on priority array manager"
+ depends on PARMAN
+ help
+ Enable this option to test priority array manager on boot
+ (or module load).
+
+ If unsure, say N.
+
+config TEST_IRQ_TIMINGS
+ bool "IRQ timings selftest"
+ depends on IRQ_TIMINGS
+ help
+ Enable this option to test the irq timings code on boot.
+
+ If unsure, say N.
+
+config TEST_LKM
+ tristate "Test module loading with 'hello world' module"
+ depends on m
+ help
+ This builds the "test_module" module that emits "Hello, world"
+ on printk when loaded. It is designed to be used for basic
+ evaluation of the module loading subsystem (for example when
+ validating module verification). It lacks any extra dependencies,
+ and will not normally be loaded by the system unless explicitly
+ requested by name.
+
+ If unsure, say N.
+
+config TEST_BITOPS
+ tristate "Test module for compilation of bitops operations"
+ depends on m
+ help
+ This builds the "test_bitops" module that is much like the
+ TEST_LKM module except that it does a basic exercise of the
+ set/clear_bit macros and get_count_order/long to make sure there are
+ no compiler warnings from C=1 sparse checker or -Wextra
+ compilations. It has no dependencies and doesn't run or load unless
+ explicitly requested by name. for example: modprobe test_bitops.
+
+ If unsure, say N.
+
+config TEST_VMALLOC
+ tristate "Test module for stress/performance analysis of vmalloc allocator"
+ default n
+ depends on MMU
+ depends on m
+ help
+ This builds the "test_vmalloc" module that should be used for
+ stress and performance analysis. So, any new change for vmalloc
+ subsystem can be evaluated from performance and stability point
+ of view.
+
+ If unsure, say N.
+
+config TEST_USER_COPY
+ tristate "Test user/kernel boundary protections"
+ depends on m
+ help
+ This builds the "test_user_copy" module that runs sanity checks
+ on the copy_to/from_user infrastructure, making sure basic
+ user/kernel boundary testing is working. If it fails to load,
+ a regression has been detected in the user/kernel memory boundary
+ protections.
+
+ If unsure, say N.
+
+config TEST_BPF
+ tristate "Test BPF filter functionality"
+ depends on m && NET
+ help
+ This builds the "test_bpf" module that runs various test vectors
+ against the BPF interpreter or BPF JIT compiler depending on the
+ current setting. This is in particular useful for BPF JIT compiler
+ development, but also to run regression tests against changes in
+ the interpreter code. It also enables test stubs for eBPF maps and
+ verifier used by user space verifier testsuite.
+
+ If unsure, say N.
+
+config TEST_BLACKHOLE_DEV
+ tristate "Test blackhole netdev functionality"
+ depends on m && NET
+ help
+ This builds the "test_blackhole_dev" module that validates the
+ data path through this blackhole netdev.
+
+ If unsure, say N.
+
+config FIND_BIT_BENCHMARK
+ tristate "Test find_bit functions"
+ help
+ This builds the "test_find_bit" module that measure find_*_bit()
+ functions performance.
+
+ If unsure, say N.
+
+config TEST_FIRMWARE
+ tristate "Test firmware loading via userspace interface"
+ depends on FW_LOADER
+ help
+ This builds the "test_firmware" module that creates a userspace
+ interface for testing firmware loading. This can be used to
+ control the triggering of firmware loading without needing an
+ actual firmware-using device. The contents can be rechecked by
+ userspace.
+
+ If unsure, say N.
+
+config TEST_SYSCTL
+ tristate "sysctl test driver"
+ depends on PROC_SYSCTL
+ help
+ This builds the "test_sysctl" module. This driver enables to test the
+ proc sysctl interfaces available to drivers safely without affecting
+ production knobs which might alter system functionality.
+
+ If unsure, say N.
+
+config BITFIELD_KUNIT
+ tristate "KUnit test bitfield functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the bitfield functions at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config CHECKSUM_KUNIT
+ tristate "KUnit test checksum functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the checksum functions at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config HASH_KUNIT_TEST
+ tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the kernel's string (<linux/stringhash.h>), and
+ integer (<linux/hash.h>) hash functions on boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
+config RESOURCE_KUNIT_TEST
+ tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the resource API unit test.
+ Tests the logic of API provided by resource.c and ioport.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config SYSCTL_KUNIT_TEST
+ tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the proc sysctl unit test, which runs on boot.
+ Tests the API contract and implementation correctness of sysctl.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config LIST_KUNIT_TEST
+ tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the linked list KUnit test suite.
+ It tests that the API and basic functionality of the list_head type
+ and associated macros.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (https://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config HASHTABLE_KUNIT_TEST
+ tristate "KUnit Test for Kernel Hashtable structures" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the hashtable KUnit test suite.
+ It tests the basic functionality of the API defined in
+ include/linux/hashtable.h. For more information on KUnit and
+ unit tests in general please refer to the KUnit documentation
+ in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config LINEAR_RANGES_TEST
+ tristate "KUnit test for linear_ranges"
+ depends on KUNIT
+ select LINEAR_RANGES
+ help
+ This builds the linear_ranges unit test, which runs on boot.
+ Tests the linear_ranges logic correctness.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config CMDLINE_KUNIT_TEST
+ tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the cmdline API unit test.
+ Tests the logic of API provided by cmdline.c.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config BITS_TEST
+ tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds the bits unit test.
+ Tests the logic of macros defined in bits.h.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config SLUB_KUNIT_TEST
+ tristate "KUnit test for SLUB cache error detection" if !KUNIT_ALL_TESTS
+ depends on SLUB_DEBUG && KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This builds SLUB allocator unit test.
+ Tests SLUB cache debugging functionality.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config RATIONAL_KUNIT_TEST
+ tristate "KUnit test for rational.c" if !KUNIT_ALL_TESTS
+ depends on KUNIT && RATIONAL
+ default KUNIT_ALL_TESTS
+ help
+ This builds the rational math unit test.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config MEMCPY_KUNIT_TEST
+ tristate "Test memcpy(), memmove(), and memset() functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for memcpy(), memmove(), and memset() functions.
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config MEMCPY_SLOW_KUNIT_TEST
+ bool "Include exhaustive memcpy tests"
+ depends on MEMCPY_KUNIT_TEST
+ default y
+ help
+ Some memcpy tests are quite exhaustive in checking for overlaps
+ and bit ranges. These can be very slow, so they are split out
+ as a separate config, in case they need to be disabled.
+
+ Note this config option will be replaced by the use of KUnit test
+ attributes.
+
+config IS_SIGNED_TYPE_KUNIT_TEST
+ tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the is_signed_type() macro.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config OVERFLOW_KUNIT_TEST
+ tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for the check_*_overflow(), size_*(), allocation, and
+ related functions.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config STACKINIT_KUNIT_TEST
+ tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Test if the kernel is zero-initializing stack variables and
+ padding. Coverage is controlled by compiler flags,
+ CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO,
+ CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
+ or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+
+config FORTIFY_KUNIT_TEST
+ tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT && FORTIFY_SOURCE
+ default KUNIT_ALL_TESTS
+ help
+ Builds unit tests for checking internals of FORTIFY_SOURCE as used
+ by the str*() and mem*() family of functions. For testing runtime
+ traps of FORTIFY_SOURCE, see LKDTM's "FORTIFY_*" tests.
+
+config HW_BREAKPOINT_KUNIT_TEST
+ bool "Test hw_breakpoint constraints accounting" if !KUNIT_ALL_TESTS
+ depends on HAVE_HW_BREAKPOINT
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ Tests for hw_breakpoint constraints accounting.
+
+ If unsure, say N.
+
+config STRCAT_KUNIT_TEST
+ tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+
+config STRSCPY_KUNIT_TEST
+ tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+
+config SIPHASH_KUNIT_TEST
+ tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
+ functions on boot (or module load).
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
+config TEST_UDELAY
+ tristate "udelay test driver"
+ help
+ This builds the "udelay_test" module that helps to make sure
+ that udelay() is working properly.
+
+ If unsure, say N.
+
+config TEST_STATIC_KEYS
+ tristate "Test static keys"
+ depends on m
+ help
+ Test the static key interfaces.
+
+ If unsure, say N.
+
+config TEST_DYNAMIC_DEBUG
+ tristate "Test DYNAMIC_DEBUG"
+ depends on DYNAMIC_DEBUG
+ help
+ This module registers a tracer callback to count enabled
+ pr_debugs in a 'do_debugging' function, then alters their
+ enablements, calls the function, and compares counts.
+
+ If unsure, say N.
+
+config TEST_KMOD
+ tristate "kmod stress tester"
+ depends on m
+ depends on NETDEVICES && NET_CORE && INET # for TUN
+ depends on BLOCK
+ depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS
+ select TEST_LKM
+ select XFS_FS
+ select TUN
+ select BTRFS_FS
+ help
+ Test the kernel's module loading mechanism: kmod. kmod implements
+ support to load modules using the Linux kernel's usermode helper.
+ This test provides a series of tests against kmod.
+
+ Although technically you can either build test_kmod as a module or
+ into the kernel we disallow building it into the kernel since
+ it stress tests request_module() and this will very likely cause
+ some issues by taking over precious threads available from other
+ module load requests, ultimately this could be fatal.
+
+ To run tests run:
+
+ tools/testing/selftests/kmod/kmod.sh --help
+
+ If unsure, say N.
+
+config TEST_DEBUG_VIRTUAL
+ tristate "Test CONFIG_DEBUG_VIRTUAL feature"
+ depends on DEBUG_VIRTUAL
+ help
+ Test the kernel's ability to detect incorrect calls to
+ virt_to_phys() done against the non-linear part of the
+ kernel's virtual address map.
+
+ If unsure, say N.
+
+config TEST_MEMCAT_P
+ tristate "Test memcat_p() helper function"
+ help
+ Test the memcat_p() helper for correctly merging two
+ pointer arrays together.
+
+ If unsure, say N.
+
+config TEST_LIVEPATCH
+ tristate "Test livepatching"
+ default n
+ depends on DYNAMIC_DEBUG
+ depends on LIVEPATCH
+ depends on m
+ help
+ Test kernel livepatching features for correctness. The tests will
+ load test modules that will be livepatched in various scenarios.
+
+ To run all the livepatching tests:
+
+ make -C tools/testing/selftests TARGETS=livepatch run_tests
+
+ Alternatively, individual tests may be invoked:
+
+ tools/testing/selftests/livepatch/test-callbacks.sh
+ tools/testing/selftests/livepatch/test-livepatch.sh
+ tools/testing/selftests/livepatch/test-shadow-vars.sh
+
+ If unsure, say N.
+
+config TEST_OBJAGG
+ tristate "Perform selftest on object aggreration manager"
+ default n
+ depends on OBJAGG
+ help
+ Enable this option to test object aggregation manager on boot
+ (or module load).
+
+config TEST_MEMINIT
+ tristate "Test heap/page initialization"
+ help
+ Test if the kernel is zero-initializing heap and page allocations.
+ This can be useful to test init_on_alloc and init_on_free features.
+
+ If unsure, say N.
+
+config TEST_HMM
+ tristate "Test HMM (Heterogeneous Memory Management)"
+ depends on TRANSPARENT_HUGEPAGE
+ depends on DEVICE_PRIVATE
+ select HMM_MIRROR
+ select MMU_NOTIFIER
+ help
+ This is a pseudo device driver solely for testing HMM.
+ Say M here if you want to build the HMM test module.
+ Doing so will allow you to run tools/testing/selftest/vm/hmm-tests.
+
+ If unsure, say N.
+
+config TEST_FREE_PAGES
+ tristate "Test freeing pages"
+ help
+ Test that a memory leak does not occur due to a race between
+ freeing a block of pages and a speculative page reference.
+ Loading this module is safe if your kernel has the bug fixed.
+ If the bug is not fixed, it will leak gigabytes of memory and
+ probably OOM your system.
+
+config TEST_FPU
+ tristate "Test floating point operations in kernel space"
+ depends on X86 && !KCOV_INSTRUMENT_ALL
+ help
+ Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu
+ which will trigger a sequence of floating point operations. This is used
+ for self-testing floating point control register setting in
+ kernel_fpu_begin().
+
+ If unsure, say N.
+
+config TEST_CLOCKSOURCE_WATCHDOG
+ tristate "Test clocksource watchdog in kernel space"
+ depends on CLOCKSOURCE_WATCHDOG
+ help
+ Enable this option to create a kernel module that will trigger
+ a test of the clocksource watchdog. This module may be loaded
+ via modprobe or insmod in which case it will run upon being
+ loaded, or it may be built in, in which case it will run
+ shortly after boot.
+
+ If unsure, say N.
+
+endif # RUNTIME_TESTING_MENU
+
+config ARCH_USE_MEMTEST
+ bool
+ help
+ An architecture should select this when it uses early_memtest()
+ during boot process.
+
+config MEMTEST
+ bool "Memtest"
+ depends on ARCH_USE_MEMTEST
+ help
+ This option adds a kernel parameter 'memtest', which allows memtest
+ to be set and executed.
+ memtest=0, mean disabled; -- default
+ memtest=1, mean do 1 test pattern;
+ ...
+ memtest=17, mean do 17 test patterns.
+ If you are unsure how to answer this question, answer N.
+
+
+
+config HYPERV_TESTING
+ bool "Microsoft Hyper-V driver testing"
+ default n
+ depends on HYPERV && DEBUG_FS
+ help
+ Select this option to enable Hyper-V vmbus testing.
+
+endmenu # "Kernel Testing and Coverage"
+
+menu "Rust hacking"
+
+config RUST_DEBUG_ASSERTIONS
+ bool "Debug assertions"
+ depends on RUST
+ help
+ Enables rustc's `-Cdebug-assertions` codegen option.
+
+ This flag lets you turn `cfg(debug_assertions)` conditional
+ compilation on or off. This can be used to enable extra debugging
+ code in development but not in production. For example, it controls
+ the behavior of the standard library's `debug_assert!` macro.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say N.
+
+config RUST_OVERFLOW_CHECKS
+ bool "Overflow checks"
+ default y
+ depends on RUST
+ help
+ Enables rustc's `-Coverflow-checks` codegen option.
+
+ This flag allows you to control the behavior of runtime integer
+ overflow. When overflow-checks are enabled, a Rust panic will occur
+ on overflow.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say Y.
+
+config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow unoptimized build-time assertions"
+ depends on RUST
+ help
+ Controls how are `build_error!` and `build_assert!` handled during build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+
+ This should not happen, thus by default the build is aborted. However,
+ as an escape hatch, you can choose Y here to ignore them during build
+ and let the check be carried at runtime (with `panic!` being called if
+ the check fails).
+
+ If unsure, say N.
+
+config RUST_KERNEL_DOCTESTS
+ bool "Doctests for the `kernel` crate" if !KUNIT_ALL_TESTS
+ depends on RUST && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the documentation tests of the `kernel` crate
+ as KUnit tests.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+endmenu # "Rust"
+
+endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
new file mode 100644
index 000000000..fdca89c05
--- /dev/null
+++ b/lib/Kconfig.kasan
@@ -0,0 +1,210 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+# This config refers to the generic KASAN mode.
+config HAVE_ARCH_KASAN
+ bool
+
+config HAVE_ARCH_KASAN_SW_TAGS
+ bool
+
+config HAVE_ARCH_KASAN_HW_TAGS
+ bool
+
+config HAVE_ARCH_KASAN_VMALLOC
+ bool
+
+config ARCH_DISABLE_KASAN_INLINE
+ bool
+ help
+ Disables both inline and stack instrumentation. Selected by
+ architectures that do not support these instrumentation types.
+
+config CC_HAS_KASAN_GENERIC
+ def_bool $(cc-option, -fsanitize=kernel-address)
+
+config CC_HAS_KASAN_SW_TAGS
+ def_bool $(cc-option, -fsanitize=kernel-hwaddress)
+
+# This option is only required for software KASAN modes.
+# Old GCC versions do not have proper support for no_sanitize_address.
+# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details.
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ def_bool !CC_IS_GCC || GCC_VERSION >= 80300
+
+menuconfig KASAN
+ bool "KASAN: dynamic memory safety error detector"
+ depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
+ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
+ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
+ HAVE_ARCH_KASAN_HW_TAGS
+ depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
+ select STACKDEPOT_ALWAYS_INIT
+ help
+ Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
+ error detector designed to find out-of-bounds and use-after-free bugs.
+
+ See Documentation/dev-tools/kasan.rst for details.
+
+ For better error reports, also enable CONFIG_STACKTRACE.
+
+if KASAN
+
+config CC_HAS_KASAN_MEMINTRINSIC_PREFIX
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=kernel-address -mllvm -asan-kernel-mem-intrinsic-prefix=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=kernel-address --param asan-kernel-mem-intrinsic-prefix=1))
+ # Don't define it if we don't need it: compilation of the test uses
+ # this variable to decide how the compiler should treat builtins.
+ depends on !KASAN_HW_TAGS
+ help
+ The compiler is able to prefix memintrinsics with __asan or __hwasan.
+
+choice
+ prompt "KASAN mode"
+ default KASAN_GENERIC
+ help
+ KASAN has three modes:
+
+ 1. Generic KASAN (supported by many architectures, enabled with
+ CONFIG_KASAN_GENERIC, similar to userspace ASan),
+ 2. Software Tag-Based KASAN (arm64 only, based on software memory
+ tagging, enabled with CONFIG_KASAN_SW_TAGS, similar to userspace
+ HWASan), and
+ 3. Hardware Tag-Based KASAN (arm64 only, based on hardware memory
+ tagging, enabled with CONFIG_KASAN_HW_TAGS).
+
+ See Documentation/dev-tools/kasan.rst for details about each mode.
+
+config KASAN_GENERIC
+ bool "Generic KASAN"
+ depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ select SLUB_DEBUG if SLUB
+ select CONSTRUCTORS
+ help
+ Enables Generic KASAN.
+
+ Requires GCC 8.3.0+ or Clang.
+
+ Consumes about 1/8th of available memory at kernel start and adds an
+ overhead of ~50% for dynamic allocations.
+ The performance slowdown is ~x3.
+
+ (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
+
+config KASAN_SW_TAGS
+ bool "Software Tag-Based KASAN"
+ depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
+ select SLUB_DEBUG if SLUB
+ select CONSTRUCTORS
+ help
+ Enables Software Tag-Based KASAN.
+
+ Requires GCC 11+ or Clang.
+
+ Supported only on arm64 CPUs and relies on Top Byte Ignore.
+
+ Consumes about 1/16th of available memory at kernel start and
+ add an overhead of ~20% for dynamic allocations.
+
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
+
+ (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
+
+config KASAN_HW_TAGS
+ bool "Hardware Tag-Based KASAN"
+ depends on HAVE_ARCH_KASAN_HW_TAGS
+ depends on SLUB
+ help
+ Enables Hardware Tag-Based KASAN.
+
+ Requires GCC 10+ or Clang 12+.
+
+ Supported only on arm64 CPUs starting from ARMv8.5 and relies on
+ Memory Tagging Extension and Top Byte Ignore.
+
+ Consumes about 1/32nd of available memory.
+
+ May potentially introduce problems related to pointer casting and
+ comparison, as it embeds a tag into the top byte of each pointer.
+
+endchoice
+
+choice
+ prompt "Instrumentation type"
+ depends on KASAN_GENERIC || KASAN_SW_TAGS
+ default KASAN_OUTLINE
+
+config KASAN_OUTLINE
+ bool "Outline instrumentation"
+ help
+ Makes the compiler insert function calls that check whether the memory
+ is accessible before each memory access. Slower than KASAN_INLINE, but
+ does not bloat the size of the kernel's .text section so much.
+
+config KASAN_INLINE
+ bool "Inline instrumentation"
+ depends on !ARCH_DISABLE_KASAN_INLINE
+ help
+ Makes the compiler directly insert memory accessibility checks before
+ each memory access. Faster than KASAN_OUTLINE (gives ~x2 boost for
+ some workloads), but makes the kernel's .text size much bigger.
+
+endchoice
+
+config KASAN_STACK
+ bool "Stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ depends on KASAN_GENERIC || KASAN_SW_TAGS
+ depends on !ARCH_DISABLE_KASAN_INLINE
+ default y if CC_IS_GCC
+ help
+ Disables stack instrumentation and thus KASAN's ability to detect
+ out-of-bounds bugs in stack variables.
+
+ With Clang, stack instrumentation has a problem that causes excessive
+ stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ with Clang, this option is deemed unsafe.
+
+ This option is always disabled when compile-testing with Clang to
+ avoid cluttering the log with stack overflow warnings.
+
+ With GCC, enabling stack instrumentation is assumed to be safe.
+
+ If the architecture disables inline instrumentation via
+ ARCH_DISABLE_KASAN_INLINE, stack instrumentation gets disabled
+ as well, as it adds inline-style instrumentation that is run
+ unconditionally.
+
+config KASAN_VMALLOC
+ bool "Check accesses to vmalloc allocations"
+ depends on HAVE_ARCH_KASAN_VMALLOC
+ help
+ Makes KASAN check the validity of accesses to vmalloc allocations.
+
+ With software KASAN modes, all types vmalloc allocations are
+ checked. Enabling this option leads to higher memory usage.
+
+ With Hardware Tag-Based KASAN, only non-executable VM_ALLOC mappings
+ are checked. There is no additional memory usage.
+
+config KASAN_KUNIT_TEST
+ tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
+ depends on KASAN && KUNIT && TRACEPOINTS
+ default KUNIT_ALL_TESTS
+ help
+ A KUnit-based KASAN test suite. Triggers different kinds of
+ out-of-bounds and use-after-free accesses. Useful for testing whether
+ KASAN can detect certain bug types.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+config KASAN_MODULE_TEST
+ tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
+ depends on m && KASAN && !KASAN_HW_TAGS
+ help
+ A part of the KASAN test suite that is not integrated with KUnit.
+ Incompatible with Hardware Tag-Based KASAN.
+
+endif # KASAN
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
new file mode 100644
index 000000000..609ddfc73
--- /dev/null
+++ b/lib/Kconfig.kcsan
@@ -0,0 +1,257 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KCSAN
+ bool
+
+config HAVE_KCSAN_COMPILER
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-distinguish-volatile=1))
+ help
+ For the list of compilers that support KCSAN, please see
+ <file:Documentation/dev-tools/kcsan.rst>.
+
+menuconfig KCSAN
+ bool "KCSAN: dynamic data race detector"
+ depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
+ depends on DEBUG_KERNEL && !KASAN
+ select CONSTRUCTORS
+ select STACKTRACE
+ help
+ The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
+ data-race detector that relies on compile-time instrumentation.
+ KCSAN uses a watchpoint-based sampling approach to detect races.
+
+ While KCSAN's primary purpose is to detect data races, it
+ also provides assertions to check data access constraints.
+ These assertions can expose bugs that do not manifest as
+ data races.
+
+ See <file:Documentation/dev-tools/kcsan.rst> for more details.
+
+if KCSAN
+
+config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
+ def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
+ (CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
+ help
+ The compiler instruments plain compound read-write operations
+ differently (++, --, +=, -=, |=, &=, etc.), which allows KCSAN to
+ distinguish them from other plain accesses. This is currently
+ supported by Clang 12 or later.
+
+config KCSAN_VERBOSE
+ bool "Show verbose reports with more information about system state"
+ depends on PROVE_LOCKING
+ help
+ If enabled, reports show more information about the system state that
+ may help better analyze and debug races. This includes held locks and
+ IRQ trace events.
+
+ While this option should generally be benign, we call into more
+ external functions on report generation; if a race report is
+ generated from any one of them, system stability may suffer due to
+ deadlocks or recursion. If in doubt, say N.
+
+config KCSAN_SELFTEST
+ bool "Perform short selftests on boot"
+ default y
+ help
+ Run KCSAN selftests on boot. On test failure, causes the kernel to
+ panic. Recommended to be enabled, ensuring critical functionality
+ works as intended.
+
+config KCSAN_KUNIT_TEST
+ tristate "KCSAN test for integrated runtime behaviour" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ select TORTURE_TEST
+ help
+ KCSAN test focusing on behaviour of the integrated runtime. Tests
+ various race scenarios, and verifies the reports generated to
+ console. Makes use of KUnit for test organization, and the Torture
+ framework for test thread control.
+
+ Each test case may run at least up to KCSAN_REPORT_ONCE_IN_MS
+ milliseconds. Test run duration may be optimized by building the
+ kernel and KCSAN test with KCSAN_REPORT_ONCE_IN_MS set to a lower
+ than default value.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+config KCSAN_EARLY_ENABLE
+ bool "Early enable during boot"
+ default y
+ help
+ If KCSAN should be enabled globally as soon as possible. KCSAN can
+ later be enabled/disabled via debugfs.
+
+config KCSAN_NUM_WATCHPOINTS
+ int "Number of available watchpoints"
+ default 64
+ help
+ Total number of available watchpoints. An address range maps into a
+ specific watchpoint slot as specified in kernel/kcsan/encoding.h.
+ Although larger number of watchpoints may not be usable due to
+ limited number of CPUs, a larger value helps to improve performance
+ due to reducing cache-line contention. The chosen default is a
+ conservative value; we should almost never observe "no_capacity"
+ events (see /sys/kernel/debug/kcsan).
+
+config KCSAN_UDELAY_TASK
+ int "Delay in microseconds (for tasks)"
+ default 80
+ help
+ For tasks, the microsecond delay after setting up a watchpoint.
+
+config KCSAN_UDELAY_INTERRUPT
+ int "Delay in microseconds (for interrupts)"
+ default 20
+ help
+ For interrupts, the microsecond delay after setting up a watchpoint.
+ Interrupts have tighter latency requirements, and their delay should
+ be lower than for tasks.
+
+config KCSAN_DELAY_RANDOMIZE
+ bool "Randomize above delays"
+ default y
+ help
+ If delays should be randomized, where the maximum is KCSAN_UDELAY_*.
+ If false, the chosen delays are always the KCSAN_UDELAY_* values
+ as defined above.
+
+config KCSAN_SKIP_WATCH
+ int "Skip instructions before setting up watchpoint"
+ default 4000
+ help
+ The number of per-CPU memory operations to skip, before another
+ watchpoint is set up, i.e. one in KCSAN_SKIP_WATCH per-CPU
+ memory operations are used to set up a watchpoint. A smaller value
+ results in more aggressive race detection, whereas a larger value
+ improves system performance at the cost of missing some races.
+
+config KCSAN_SKIP_WATCH_RANDOMIZE
+ bool "Randomize watchpoint instruction skip count"
+ default y
+ help
+ If instruction skip count should be randomized, where the maximum is
+ KCSAN_SKIP_WATCH. If false, the chosen value is always
+ KCSAN_SKIP_WATCH.
+
+config KCSAN_INTERRUPT_WATCHER
+ bool "Interruptible watchers" if !KCSAN_STRICT
+ default KCSAN_STRICT
+ help
+ If enabled, a task that set up a watchpoint may be interrupted while
+ delayed. This option will allow KCSAN to detect races between
+ interrupted tasks and other threads of execution on the same CPU.
+
+ Currently disabled by default, because not all safe per-CPU access
+ primitives and patterns may be accounted for, and therefore could
+ result in false positives.
+
+config KCSAN_REPORT_ONCE_IN_MS
+ int "Duration in milliseconds, in which any given race is only reported once"
+ default 3000
+ help
+ Any given race is only reported once in the defined time window.
+ Different races may still generate reports within a duration that is
+ smaller than the duration defined here. This allows rate limiting
+ reporting to avoid flooding the console with reports. Setting this
+ to 0 disables rate limiting.
+
+# The main purpose of the below options is to control reported data races, and
+# are not expected to be switched frequently by non-testers or at runtime.
+# The defaults are chosen to be conservative, and can miss certain bugs.
+
+config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
+ bool "Report races of unknown origin"
+ default y
+ help
+ If KCSAN should report races where only one access is known, and the
+ conflicting access is of unknown origin. This type of race is
+ reported if it was only possible to infer a race due to a data value
+ change while an access is being delayed on a watchpoint.
+
+config KCSAN_STRICT
+ bool "Strict data-race checking"
+ help
+ KCSAN will report data races with the strictest possible rules, which
+ closely aligns with the rules defined by the Linux-kernel memory
+ consistency model (LKMM).
+
+config KCSAN_WEAK_MEMORY
+ bool "Enable weak memory modeling to detect missing memory barriers"
+ default y
+ depends on KCSAN_STRICT
+ # We can either let objtool nop __tsan_func_{entry,exit}() and builtin
+ # atomics instrumentation in .noinstr.text, or use a compiler that can
+ # implement __no_kcsan to really remove all instrumentation.
+ depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
+ CC_IS_GCC || CLANG_VERSION >= 140000
+ select OBJTOOL if HAVE_NOINSTR_HACK
+ help
+ Enable support for modeling a subset of weak memory, which allows
+ detecting a subset of data races due to missing memory barriers.
+
+ Depends on KCSAN_STRICT, because the options strengthening certain
+ plain accesses by default (depending on !KCSAN_STRICT) reduce the
+ ability to detect any data races invoving reordered accesses, in
+ particular reordered writes.
+
+ Weak memory modeling relies on additional instrumentation and may
+ affect performance.
+
+config KCSAN_REPORT_VALUE_CHANGE_ONLY
+ bool "Only report races where watcher observed a data value change"
+ default y
+ depends on !KCSAN_STRICT
+ help
+ If enabled and a conflicting write is observed via a watchpoint, but
+ the data value of the memory location was observed to remain
+ unchanged, do not report the data race.
+
+config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
+ bool "Assume that plain aligned writes up to word size are atomic"
+ default y
+ depends on !KCSAN_STRICT
+ help
+ Assume that plain aligned writes up to word size are atomic by
+ default, and also not subject to other unsafe compiler optimizations
+ resulting in data races. This will cause KCSAN to not report data
+ races due to conflicts where the only plain accesses are aligned
+ writes up to word size: conflicts between marked reads and plain
+ aligned writes up to word size will not be reported as data races;
+ notice that data races between two conflicting plain aligned writes
+ will also not be reported.
+
+config KCSAN_IGNORE_ATOMICS
+ bool "Do not instrument marked atomic accesses"
+ depends on !KCSAN_STRICT
+ help
+ Never instrument marked atomic accesses. This option can be used for
+ additional filtering. Conflicting marked atomic reads and plain
+ writes will never be reported as a data race, however, will cause
+ plain reads and marked writes to result in "unknown origin" reports.
+ If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data
+ races where at least one access is marked atomic will never be
+ reported.
+
+ Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned
+ accesses, conflicting marked atomic reads and plain writes will not
+ be reported as data races; however, unlike that option, data races
+ due to two conflicting plain writes will be reported (aligned and
+ unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+
+config KCSAN_PERMISSIVE
+ bool "Enable all additional permissive rules"
+ depends on KCSAN_REPORT_VALUE_CHANGE_ONLY
+ help
+ Enable additional permissive rules to ignore certain classes of data
+ races (also see kernel/kcsan/permissive.h). None of the permissive
+ rules imply that such data races are generally safe, but can be used
+ to further reduce reported data races due to data-racy patterns
+ common across the kernel.
+
+endif # KCSAN
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
new file mode 100644
index 000000000..459dda9ef
--- /dev/null
+++ b/lib/Kconfig.kfence
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KFENCE
+ bool
+
+menuconfig KFENCE
+ bool "KFENCE: low-overhead sampling-based memory safety error detector"
+ depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ select STACKTRACE
+ select IRQ_WORK
+ help
+ KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
+ access, use-after-free, and invalid-free errors. KFENCE is designed
+ to have negligible cost to permit enabling it in production
+ environments.
+
+ See <file:Documentation/dev-tools/kfence.rst> for more details.
+
+ Note that, KFENCE is not a substitute for explicit testing with tools
+ such as KASAN. KFENCE can detect a subset of bugs that KASAN can
+ detect, albeit at very different performance profiles. If you can
+ afford to use KASAN, continue using KASAN, for example in test
+ environments. If your kernel targets production use, and cannot
+ enable KASAN due to its cost, consider using KFENCE.
+
+if KFENCE
+
+config KFENCE_SAMPLE_INTERVAL
+ int "Default sample interval in milliseconds"
+ default 100
+ help
+ The KFENCE sample interval determines the frequency with which heap
+ allocations will be guarded by KFENCE. May be overridden via boot
+ parameter "kfence.sample_interval".
+
+ Set this to 0 to disable KFENCE by default, in which case only
+ setting "kfence.sample_interval" to a non-zero value enables KFENCE.
+
+config KFENCE_NUM_OBJECTS
+ int "Number of guarded objects available"
+ range 1 65535
+ default 255
+ help
+ The number of guarded objects available. For each KFENCE object, 2
+ pages are required; with one containing the object and two adjacent
+ ones used as guard pages.
+
+config KFENCE_DEFERRABLE
+ bool "Use a deferrable timer to trigger allocations"
+ help
+ Use a deferrable timer to trigger allocations. This avoids forcing
+ CPU wake-ups if the system is idle, at the risk of a less predictable
+ sample interval.
+
+ Warning: The KUnit test suite fails with this option enabled - due to
+ the unpredictability of the sample interval!
+
+ Say N if you are unsure.
+
+config KFENCE_STATIC_KEYS
+ bool "Use static keys to set up allocations" if EXPERT
+ depends on JUMP_LABEL
+ help
+ Use static keys (static branches) to set up KFENCE allocations. This
+ option is only recommended when using very large sample intervals, or
+ performance has carefully been evaluated with this option.
+
+ Using static keys comes with trade-offs that need to be carefully
+ evaluated given target workloads and system architectures. Notably,
+ enabling and disabling static keys invoke IPI broadcasts, the latency
+ and impact of which is much harder to predict than a dynamic branch.
+
+ Say N if you are unsure.
+
+config KFENCE_STRESS_TEST_FAULTS
+ int "Stress testing of fault handling and error reporting" if EXPERT
+ default 0
+ help
+ The inverse probability with which to randomly protect KFENCE object
+ pages, resulting in spurious use-after-frees. The main purpose of
+ this option is to stress test KFENCE with concurrent error reports
+ and allocations/frees. A value of 0 disables stress testing logic.
+
+ Only for KFENCE testing; set to 0 if you are not a KFENCE developer.
+
+config KFENCE_KUNIT_TEST
+ tristate "KFENCE integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KFENCE, testing various error detection scenarios with
+ various allocation types, and checking that reports are correctly
+ output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif # KFENCE
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
new file mode 100644
index 000000000..3b9a44008
--- /dev/null
+++ b/lib/Kconfig.kgdb
@@ -0,0 +1,165 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KGDB
+ bool
+
+# set if architecture has the its kgdb_arch_handle_qxfer_pkt
+# function to enable gdb stub to address XML packet sent from GDB.
+config HAVE_ARCH_KGDB_QXFER_PKT
+ bool
+
+menuconfig KGDB
+ bool "KGDB: kernel debugger"
+ depends on HAVE_ARCH_KGDB
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, it will be possible to remotely debug the
+ kernel using gdb. It is recommended but not required, that
+ you also turn on the kernel config option
+ CONFIG_FRAME_POINTER to aid in producing more reliable stack
+ backtraces in the external debugger. Documentation of
+ kernel debugger is available at http://kgdb.sourceforge.net
+ as well as in Documentation/dev-tools/kgdb.rst. If
+ unsure, say N.
+
+if KGDB
+
+config KGDB_HONOUR_BLOCKLIST
+ bool "KGDB: use kprobe blocklist to prohibit unsafe breakpoints"
+ depends on HAVE_KPROBES
+ depends on MODULES
+ select KPROBES
+ default y
+ help
+ If set to Y the debug core will use the kprobe blocklist to
+ identify symbols where it is unsafe to set breakpoints.
+ In particular this disallows instrumentation of functions
+ called during debug trap handling and thus makes it very
+ difficult to inadvertently provoke recursive trap handling.
+
+ If unsure, say Y.
+
+config KGDB_SERIAL_CONSOLE
+ tristate "KGDB: use kgdb over the serial console"
+ select CONSOLE_POLL
+ select MAGIC_SYSRQ
+ depends on TTY && HW_CONSOLE
+ default y
+ help
+ Share a serial console with kgdb. Sysrq-g must be used
+ to break in initially.
+
+config KGDB_TESTS
+ bool "KGDB: internal test suite"
+ default n
+ help
+ This is a kgdb I/O module specifically designed to test
+ kgdb's internal functions. This kgdb I/O module is
+ intended to for the development of new kgdb stubs
+ as well as regression testing the kgdb internals.
+ See the drivers/misc/kgdbts.c for the details about
+ the tests. The most basic of this I/O module is to boot
+ a kernel boot arguments "kgdbwait kgdbts=V1F100"
+
+config KGDB_TESTS_ON_BOOT
+ bool "KGDB: Run tests on boot"
+ depends on KGDB_TESTS
+ default n
+ help
+ Run the kgdb tests on boot up automatically without the need
+ to pass in a kernel parameter
+
+config KGDB_TESTS_BOOT_STRING
+ string "KGDB: which internal kgdb tests to run"
+ depends on KGDB_TESTS_ON_BOOT
+ default "V1F100"
+ help
+ This is the command string to send the kgdb test suite on
+ boot. See the drivers/misc/kgdbts.c for detailed
+ information about other strings you could use beyond the
+ default of V1F100.
+
+config KGDB_LOW_LEVEL_TRAP
+ bool "KGDB: Allow debugging with traps in notifiers"
+ depends on X86 || MIPS
+ default n
+ help
+ This will add an extra call back to kgdb for the breakpoint
+ exception handler which will allow kgdb to step through a
+ notify handler.
+
+config KGDB_KDB
+ bool "KGDB_KDB: include kdb frontend for kgdb"
+ default n
+ help
+ KDB frontend for kernel
+
+config KDB_DEFAULT_ENABLE
+ hex "KDB: Select kdb command functions to be enabled by default"
+ depends on KGDB_KDB
+ default 0x1
+ help
+ Specifiers which kdb commands are enabled by default. This may
+ be set to 1 or 0 to enable all commands or disable almost all
+ commands.
+
+ Alternatively the following bitmask applies:
+
+ 0x0002 - allow arbitrary reads from memory and symbol lookup
+ 0x0004 - allow arbitrary writes to memory
+ 0x0008 - allow current register state to be inspected
+ 0x0010 - allow current register state to be modified
+ 0x0020 - allow passive inspection (backtrace, process list, lsmod)
+ 0x0040 - allow flow control management (breakpoint, single step)
+ 0x0080 - enable signalling of processes
+ 0x0100 - allow machine to be rebooted
+
+ The config option merely sets the default at boot time. Both
+ issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+ setting with kdb.cmd_enable=X kernel command line option will
+ override the default settings.
+
+config KDB_KEYBOARD
+ bool "KGDB_KDB: keyboard as input device"
+ depends on VT && KGDB_KDB && !PARISC
+ default n
+ help
+ KDB can use a PS/2 type keyboard for an input device
+
+config KDB_CONTINUE_CATASTROPHIC
+ int "KDB: continue after catastrophic errors"
+ depends on KGDB_KDB
+ default "0"
+ help
+ This integer controls the behaviour of kdb when the kernel gets a
+ catastrophic error, i.e. for a panic or oops.
+ When KDB is active and a catastrophic error occurs, nothing extra
+ will happen until you type 'go'.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
+ you type 'go', you will be warned by kdb. The secend time you type
+ 'go', KDB tries to continue. No guarantees that the
+ kernel is still usable in this situation.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue.
+ No guarantees that the kernel is still usable in this situation.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
+ If you are not sure, say 0.
+
+config ARCH_HAS_EARLY_DEBUG
+ bool
+ default n
+ help
+ If an architecture can definitely handle entering the debugger
+ when early_param's are parsed then it select this config.
+ Otherwise, if "kgdbwait" is passed on the kernel command line it
+ won't actually be processed until dbg_late_init() just after the
+ call to kgdb_arch_late() is made.
+
+ NOTE: Even if this isn't selected by an architecture we will
+ still try to register kgdb to handle breakpoints and crashes
+ when early_param's are parsed, we just won't act on the
+ "kgdbwait" parameter until dbg_late_init(). If you get a
+ crash and try to drop into kgdb somewhere between these two
+ places you might or might not end up being able to use kgdb
+ depending on exactly how far along the architecture has initted.
+
+endif # KGDB
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
new file mode 100644
index 000000000..ef2c8f256
--- /dev/null
+++ b/lib/Kconfig.kmsan
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARCH_KMSAN
+ bool
+
+config HAVE_KMSAN_COMPILER
+ # Clang versions <14.0.0 also support -fsanitize=kernel-memory, but not
+ # all the features necessary to build the kernel with KMSAN.
+ depends on CC_IS_CLANG && CLANG_VERSION >= 140000
+ def_bool $(cc-option,-fsanitize=kernel-memory -mllvm -msan-disable-checks=1)
+
+config KMSAN
+ bool "KMSAN: detector of uninitialized values use"
+ depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
+ depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
+ depends on !PREEMPT_RT
+ select STACKDEPOT
+ select STACKDEPOT_ALWAYS_INIT
+ help
+ KernelMemorySanitizer (KMSAN) is a dynamic detector of uses of
+ uninitialized values in the kernel. It is based on compiler
+ instrumentation provided by Clang and thus requires Clang to build.
+
+ An important note is that KMSAN is not intended for production use,
+ because it drastically increases kernel memory footprint and slows
+ the whole system down.
+
+ See <file:Documentation/dev-tools/kmsan.rst> for more details.
+
+if KMSAN
+
+config HAVE_KMSAN_PARAM_RETVAL
+ # -fsanitize-memory-param-retval is supported only by Clang >= 14.
+ depends on HAVE_KMSAN_COMPILER
+ def_bool $(cc-option,-fsanitize=kernel-memory -fsanitize-memory-param-retval)
+
+config KMSAN_CHECK_PARAM_RETVAL
+ bool "Check for uninitialized values passed to and returned from functions"
+ default y
+ depends on HAVE_KMSAN_PARAM_RETVAL
+ help
+ If the compiler supports -fsanitize-memory-param-retval, KMSAN will
+ eagerly check every function parameter passed by value and every
+ function return value.
+
+ Disabling KMSAN_CHECK_PARAM_RETVAL will result in tracking shadow for
+ function parameters and return values across function borders. This
+ is a more relaxed mode, but it generates more instrumentation code and
+ may potentially report errors in corner cases when non-instrumented
+ functions call instrumented ones.
+
+config KMSAN_KUNIT_TEST
+ tristate "KMSAN integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KMSAN, testing various error detection scenarios,
+ and checking that reports are correctly output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
new file mode 100644
index 000000000..59e21bfec
--- /dev/null
+++ b/lib/Kconfig.ubsan
@@ -0,0 +1,163 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ARCH_HAS_UBSAN_SANITIZE_ALL
+ bool
+
+menuconfig UBSAN
+ bool "Undefined behaviour sanity checker"
+ help
+ This option enables the Undefined Behaviour sanity checker.
+ Compile-time instrumentation is used to detect various undefined
+ behaviours at runtime. For more details, see:
+ Documentation/dev-tools/ubsan.rst
+
+if UBSAN
+
+config UBSAN_TRAP
+ bool "Abort on Sanitizer warnings (smaller kernel but less verbose)"
+ depends on !COMPILE_TEST
+ help
+ Building kernels with Sanitizer features enabled tends to grow
+ the kernel size by around 5%, due to adding all the debugging
+ text on failure paths. To avoid this, Sanitizer instrumentation
+ can just issue a trap. This reduces the kernel size overhead but
+ turns all warnings (including potentially harmless conditions)
+ into full exceptions that abort the running kernel code
+ (regardless of context, locks held, etc), which may destabilize
+ the system. For some system builders this is an acceptable
+ trade-off.
+
+ Also note that selecting Y will cause your kernel to Oops
+ with an "illegal instruction" error with no further details
+ when a UBSAN violation occurs. (Except on arm64, which will
+ report which Sanitizer failed.) This may make it hard to
+ determine whether an Oops was caused by UBSAN or to figure
+ out the details of a UBSAN violation. It makes the kernel log
+ output less useful for bug reports.
+
+config CC_HAS_UBSAN_BOUNDS_STRICT
+ def_bool $(cc-option,-fsanitize=bounds-strict)
+ help
+ The -fsanitize=bounds-strict option is only available on GCC,
+ but uses the more strict handling of arrays that includes knowledge
+ of flexible arrays, which is comparable to Clang's regular
+ -fsanitize=bounds.
+
+config CC_HAS_UBSAN_ARRAY_BOUNDS
+ def_bool $(cc-option,-fsanitize=array-bounds)
+ help
+ Under Clang, the -fsanitize=bounds option is actually composed
+ of two more specific options, -fsanitize=array-bounds and
+ -fsanitize=local-bounds. However, -fsanitize=local-bounds can
+ only be used when trap mode is enabled. (See also the help for
+ CONFIG_LOCAL_BOUNDS.) Explicitly check for -fsanitize=array-bounds
+ so that we can build up the options needed for UBSAN_BOUNDS
+ with or without UBSAN_TRAP.
+
+config UBSAN_BOUNDS
+ bool "Perform array index bounds checking"
+ default UBSAN
+ depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS_STRICT
+ help
+ This option enables detection of directly indexed out of bounds
+ array accesses, where the array size is known at compile time.
+ Note that this does not protect array overflows via bad calls
+ to the {str,mem}*cpy() family of functions (that is addressed
+ by CONFIG_FORTIFY_SOURCE).
+
+config UBSAN_BOUNDS_STRICT
+ def_bool UBSAN_BOUNDS && CC_HAS_UBSAN_BOUNDS_STRICT
+ help
+ GCC's bounds sanitizer. This option is used to select the
+ correct options in Makefile.ubsan.
+
+config UBSAN_ARRAY_BOUNDS
+ def_bool UBSAN_BOUNDS && CC_HAS_UBSAN_ARRAY_BOUNDS
+ help
+ Clang's array bounds sanitizer. This option is used to select
+ the correct options in Makefile.ubsan.
+
+config UBSAN_LOCAL_BOUNDS
+ def_bool UBSAN_ARRAY_BOUNDS && UBSAN_TRAP
+ help
+ This option enables Clang's -fsanitize=local-bounds which traps
+ when an access through a pointer that is derived from an object
+ of a statically-known size, where an added offset (which may not
+ be known statically) is out-of-bounds. Since this option is
+ trap-only, it depends on CONFIG_UBSAN_TRAP.
+
+config UBSAN_SHIFT
+ bool "Perform checking for bit-shift overflows"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=shift)
+ help
+ This option enables -fsanitize=shift which checks for bit-shift
+ operations that overflow to the left or go switch to negative
+ for signed types.
+
+config UBSAN_DIV_ZERO
+ bool "Perform checking for integer divide-by-zero"
+ depends on $(cc-option,-fsanitize=integer-divide-by-zero)
+ # https://github.com/ClangBuiltLinux/linux/issues/1657
+ # https://github.com/llvm/llvm-project/issues/56289
+ depends on !CC_IS_CLANG
+ help
+ This option enables -fsanitize=integer-divide-by-zero which checks
+ for integer division by zero. This is effectively redundant with the
+ kernel's existing exception handling, though it can provide greater
+ debugging information under CONFIG_UBSAN_REPORT_FULL.
+
+config UBSAN_UNREACHABLE
+ bool "Perform checking for unreachable code"
+ # objtool already handles unreachable checking and gets angry about
+ # seeing UBSan instrumentation located in unreachable places.
+ depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION))
+ depends on $(cc-option,-fsanitize=unreachable)
+ help
+ This option enables -fsanitize=unreachable which checks for control
+ flow reaching an expected-to-be-unreachable position.
+
+config UBSAN_BOOL
+ bool "Perform checking for non-boolean values used as boolean"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=bool)
+ help
+ This option enables -fsanitize=bool which checks for boolean values being
+ loaded that are neither 0 nor 1.
+
+config UBSAN_ENUM
+ bool "Perform checking for out of bounds enum values"
+ default UBSAN
+ depends on $(cc-option,-fsanitize=enum)
+ help
+ This option enables -fsanitize=enum which checks for values being loaded
+ into an enum that are outside the range of given values for the given enum.
+
+config UBSAN_ALIGNMENT
+ bool "Perform checking for misaligned pointer usage"
+ default !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ depends on !UBSAN_TRAP && !COMPILE_TEST
+ depends on $(cc-option,-fsanitize=alignment)
+ help
+ This option enables the check of unaligned memory accesses.
+ Enabling this option on architectures that support unaligned
+ accesses may produce a lot of false positives.
+
+config UBSAN_SANITIZE_ALL
+ bool "Enable instrumentation for the entire kernel"
+ depends on ARCH_HAS_UBSAN_SANITIZE_ALL
+ default y
+ help
+ This option activates instrumentation for the entire kernel.
+ If you don't enable this option, you have to explicitly specify
+ UBSAN_SANITIZE := y for the files/directories you want to check for UB.
+ Enabling this option will get kernel image size increased
+ significantly.
+
+config TEST_UBSAN
+ tristate "Module for testing for undefined behavior detection"
+ depends on m
+ help
+ This is a test module for UBSAN.
+ It triggers various undefined behavior, and detect it.
+
+endif # if UBSAN
diff --git a/lib/Makefile b/lib/Makefile
new file mode 100644
index 000000000..740109b6e
--- /dev/null
+++ b/lib/Makefile
@@ -0,0 +1,440 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for some libs needed in the kernel.
+#
+
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
+
+# These files are disabled because they produce lots of non-interesting and/or
+# flaky coverage that is not a function of syscall inputs. For example,
+# rbtree can be global and individual rotations don't correlate with inputs.
+KCOV_INSTRUMENT_string.o := n
+KCOV_INSTRUMENT_rbtree.o := n
+KCOV_INSTRUMENT_list_debug.o := n
+KCOV_INSTRUMENT_debugobjects.o := n
+KCOV_INSTRUMENT_dynamic_debug.o := n
+KCOV_INSTRUMENT_fault-inject.o := n
+
+# string.o implements standard library functions like memset/memcpy etc.
+# Use -ffreestanding to ensure that the compiler does not try to "optimize"
+# them into calls to themselves.
+CFLAGS_string.o := -ffreestanding
+
+# Early boot use of cmdline, don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KASAN_SANITIZE_string.o := n
+
+CFLAGS_string.o += -fno-stack-protector
+endif
+
+lib-y := ctype.o string.o vsprintf.o cmdline.o \
+ rbtree.o radix-tree.o timerqueue.o xarray.o \
+ maple_tree.o idr.o extable.o irq_regs.o argv_split.o \
+ flex_proportions.o ratelimit.o \
+ is_single_threaded.o plist.o decompress.o kobject_uevent.o \
+ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
+ nmi_backtrace.o win_minmax.o memcat_p.o \
+ buildid.o
+
+lib-$(CONFIG_PRINTK) += dump_stack.o
+lib-$(CONFIG_SMP) += cpumask.o
+
+lib-y += kobject.o klist.o
+obj-y += lockref.o
+
+obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
+ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
+ list_sort.o uuid.o iov_iter.o clz_ctz.o \
+ bsearch.o find_bit.o llist.o memweight.o kfifo.o \
+ percpu-refcount.o rhashtable.o base64.o \
+ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
+ generic-radix-tree.o
+obj-$(CONFIG_STRING_SELFTEST) += test_string.o
+obj-y += string_helpers.o
+obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
+obj-y += hexdump.o
+obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
+obj-y += kstrtox.o
+obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o
+obj-$(CONFIG_TEST_BPF) += test_bpf.o
+test_dhry-objs := dhry_1.o dhry_2.o dhry_run.o
+obj-$(CONFIG_TEST_DHRY) += test_dhry.o
+obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
+CFLAGS_test_bitops.o += -Werror
+obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
+obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
+obj-$(CONFIG_TEST_IOV_ITER) += kunit_iov_iter.o
+obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
+obj-$(CONFIG_TEST_IDA) += test_ida.o
+obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
+CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
+UBSAN_SANITIZE_test_ubsan.o := y
+obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
+obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
+obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
+obj-$(CONFIG_TEST_LKM) += test_module.o
+obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
+obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
+obj-$(CONFIG_TEST_SORT) += test_sort.o
+obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
+obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
+obj-$(CONFIG_TEST_PRINTF) += test_printf.o
+obj-$(CONFIG_TEST_SCANF) += test_scanf.o
+
+obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_KASAN),yy)
+# FIXME: Clang breaks test_bitmap_const_eval when KASAN and GCOV are enabled
+GCOV_PROFILE_test_bitmap.o := n
+endif
+
+obj-$(CONFIG_TEST_UUID) += test_uuid.o
+obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
+obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
+obj-$(CONFIG_TEST_PARMAN) += test_parman.o
+obj-$(CONFIG_TEST_KMOD) += test_kmod.o
+obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
+obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
+obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
+obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
+obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o
+obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o
+obj-$(CONFIG_TEST_HMM) += test_hmm.o
+obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o
+obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
+obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
+CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
+obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+#
+# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
+# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
+# get appended last to CFLAGS and thus override those previous compiler options.
+#
+FPU_CFLAGS := -msse -msse2
+ifdef CONFIG_CC_IS_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
+#
+# The "-msse" in the first argument is there so that the
+# -mpreferred-stack-boundary=3 build error:
+#
+# -mpreferred-stack-boundary=3 is not between 4 and 12
+#
+# can be triggered. Otherwise gcc doesn't complain.
+FPU_CFLAGS += -mhard-float
+FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
+endif
+
+obj-$(CONFIG_TEST_FPU) += test_fpu.o
+CFLAGS_test_fpu.o += $(FPU_CFLAGS)
+
+obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
+
+# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
+# so we can't just use obj-$(CONFIG_KUNIT).
+ifdef CONFIG_KUNIT
+obj-y += kunit/
+endif
+
+ifeq ($(CONFIG_DEBUG_KOBJECT),y)
+CFLAGS_kobject.o += -DDEBUG
+CFLAGS_kobject_uevent.o += -DDEBUG
+endif
+
+obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
+CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
+
+obj-y += math/ crypto/
+
+obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
+obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
+obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
+obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+
+lib-y += logic_pio.o
+
+lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+
+obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o
+
+obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
+obj-$(CONFIG_BTREE) += btree.o
+obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
+obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
+obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+obj-$(CONFIG_LIST_HARDENED) += list_debug.o
+obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
+
+obj-$(CONFIG_BITREVERSE) += bitrev.o
+obj-$(CONFIG_LINEAR_RANGES) += linear_ranges.o
+obj-$(CONFIG_PACKING) += packing.o
+obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
+obj-$(CONFIG_CRC16) += crc16.o
+obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
+obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
+obj-$(CONFIG_CRC32) += crc32.o
+obj-$(CONFIG_CRC64) += crc64.o
+obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
+obj-$(CONFIG_CRC4) += crc4.o
+obj-$(CONFIG_CRC7) += crc7.o
+obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
+obj-$(CONFIG_CRC8) += crc8.o
+obj-$(CONFIG_CRC64_ROCKSOFT) += crc64-rocksoft.o
+obj-$(CONFIG_XXHASH) += xxhash.o
+obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
+
+obj-$(CONFIG_842_COMPRESS) += 842/
+obj-$(CONFIG_842_DECOMPRESS) += 842/
+obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
+obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
+obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc/
+obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
+obj-$(CONFIG_BCH) += bch.o
+obj-$(CONFIG_LZO_COMPRESS) += lzo/
+obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+obj-$(CONFIG_LZ4_COMPRESS) += lz4/
+obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
+obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
+obj-$(CONFIG_XZ_DEC) += xz/
+obj-$(CONFIG_RAID6_PQ) += raid6/
+
+lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
+lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
+lib-$(CONFIG_DECOMPRESS_LZ4) += decompress_unlz4.o
+lib-$(CONFIG_DECOMPRESS_ZSTD) += decompress_unzstd.o
+
+obj-$(CONFIG_TEXTSEARCH) += textsearch.o
+obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
+obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
+obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
+obj-$(CONFIG_SMP) += percpu_counter.o
+obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
+
+obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
+obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_FAULT_INJECTION_USERCOPY) += fault-inject-usercopy.o
+obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
+obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
+obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
+obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
+obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
+ of-reconfig-notifier-error-inject.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
+lib-$(CONFIG_GENERIC_BUG) += bug.o
+
+obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+
+obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
+#ensure exported functions have prototypes
+CFLAGS_dynamic_debug.o := -DDYNAMIC_DEBUG_MODULE
+
+obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o
+
+obj-$(CONFIG_NLATTR) += nlattr.o
+
+obj-$(CONFIG_LRU_CACHE) += lru_cache.o
+
+obj-$(CONFIG_GENERIC_CSUM) += checksum.o
+
+obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
+
+obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
+
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
+obj-$(CONFIG_DQL) += dynamic_queue_limits.o
+
+obj-$(CONFIG_GLOB) += glob.o
+obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
+
+obj-$(CONFIG_DIMLIB) += dim/
+obj-$(CONFIG_SIGNATURE) += digsig.o
+
+lib-$(CONFIG_CLZ_TAB) += clz_tab.o
+
+obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
+obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
+
+obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
+
+obj-$(CONFIG_SG_SPLIT) += sg_split.o
+obj-$(CONFIG_SG_POOL) += sg_pool.o
+obj-$(CONFIG_MEMREGION) += memregion.o
+obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
+obj-$(CONFIG_IRQ_POLL) += irq_poll.o
+
+obj-$(CONFIG_POLYNOMIAL) += polynomial.o
+
+# stackdepot.c should not be instrumented or call instrumented functions.
+# Prevent the compiler from calling builtins like memcmp() or bcmp() from this
+# file.
+CFLAGS_stackdepot.o += -fno-builtin
+obj-$(CONFIG_STACKDEPOT) += stackdepot.o
+KASAN_SANITIZE_stackdepot.o := n
+# In particular, instrumenting stackdepot.c with KMSAN will result in infinite
+# recursion.
+KMSAN_SANITIZE_stackdepot.o := n
+KCOV_INSTRUMENT_stackdepot.o := n
+
+obj-$(CONFIG_REF_TRACKER) += ref_tracker.o
+
+libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
+ fdt_empty_tree.o fdt_addresses.o
+$(foreach file, $(libfdt_files), \
+ $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
+lib-$(CONFIG_LIBFDT) += $(libfdt_files)
+
+obj-$(CONFIG_BOOT_CONFIG) += bootconfig.o
+obj-$(CONFIG_BOOT_CONFIG_EMBED) += bootconfig-data.o
+
+$(obj)/bootconfig-data.o: $(obj)/default.bconf
+
+targets += default.bconf
+filechk_defbconf = cat $(or $(real-prereqs), /dev/null)
+$(obj)/default.bconf: $(CONFIG_BOOT_CONFIG_EMBED_FILE) FORCE
+ $(call filechk,defbconf)
+
+obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
+obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
+
+obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
+
+obj-$(CONFIG_ASN1) += asn1_decoder.o
+obj-$(CONFIG_ASN1_ENCODER) += asn1_encoder.o
+
+obj-$(CONFIG_FONT_SUPPORT) += fonts/
+
+hostprogs := gen_crc32table
+hostprogs += gen_crc64table
+clean-files := crc32table.h
+clean-files += crc64table.h
+
+$(obj)/crc32.o: $(obj)/crc32table.h
+
+quiet_cmd_crc32 = GEN $@
+ cmd_crc32 = $< > $@
+
+$(obj)/crc32table.h: $(obj)/gen_crc32table
+ $(call cmd,crc32)
+
+$(obj)/crc64.o: $(obj)/crc64table.h
+
+quiet_cmd_crc64 = GEN $@
+ cmd_crc64 = $< > $@
+
+$(obj)/crc64table.h: $(obj)/gen_crc64table
+ $(call cmd,crc64)
+
+#
+# Build a fast OID lookip registry from include/linux/oid_registry.h
+#
+obj-$(CONFIG_OID_REGISTRY) += oid_registry.o
+
+$(obj)/oid_registry.o: $(obj)/oid_registry_data.c
+
+$(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
+ $(src)/build_OID_registry
+ $(call cmd,build_OID_registry)
+
+quiet_cmd_build_OID_registry = GEN $@
+ cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
+
+clean-files += oid_registry_data.c
+
+obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+obj-$(CONFIG_UBSAN) += ubsan.o
+
+UBSAN_SANITIZE_ubsan.o := n
+KASAN_SANITIZE_ubsan.o := n
+KCSAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := -fno-stack-protector $(DISABLE_STACKLEAK_PLUGIN)
+
+obj-$(CONFIG_SBITMAP) += sbitmap.o
+
+obj-$(CONFIG_PARMAN) += parman.o
+
+obj-y += group_cpus.o
+
+# GCC library routines
+obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
+obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
+obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
+obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
+obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_OBJAGG) += objagg.o
+
+# pldmfw library
+obj-$(CONFIG_PLDMFW) += pldmfw/
+
+# KUnit tests
+CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
+obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
+obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
+obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
+obj-$(CONFIG_BITS_TEST) += test_bits.o
+obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
+obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
+obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
+CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
+obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
+obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
+
+obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
+
+# FORTIFY_SOURCE compile-time behavior tests
+TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
+TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
+TEST_FORTIFY_LOG = test_fortify.log
+
+quiet_cmd_test_fortify = TEST $@
+ cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \
+ $< $@ "$(NM)" $(CC) $(c_flags) \
+ $(call cc-disable-warning,fortify-source) \
+ -DKBUILD_EXTRA_WARN1
+
+targets += $(TEST_FORTIFY_LOGS)
+clean-files += $(TEST_FORTIFY_LOGS)
+clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS))
+$(obj)/test_fortify/%.log: $(src)/test_fortify/%.c \
+ $(src)/test_fortify/test_fortify.h \
+ $(srctree)/include/linux/fortify-string.h \
+ $(srctree)/scripts/test_fortify.sh \
+ FORCE
+ $(call if_changed,test_fortify)
+
+quiet_cmd_gen_fortify_log = GEN $@
+ cmd_gen_fortify_log = cat </dev/null $(filter-out FORCE,$^) 2>/dev/null > $@ || true
+
+targets += $(TEST_FORTIFY_LOG)
+clean-files += $(TEST_FORTIFY_LOG)
+$(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
+ $(call if_changed,gen_fortify_log)
+
+# Fake dependency to trigger the fortify tests.
+ifeq ($(CONFIG_FORTIFY_SOURCE),y)
+$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
+endif
diff --git a/lib/argv_split.c b/lib/argv_split.c
new file mode 100644
index 000000000..e28db8e3b
--- /dev/null
+++ b/lib/argv_split.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Helper function for splitting a string into an argv-like array.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+static int count_argc(const char *str)
+{
+ int count = 0;
+ bool was_space;
+
+ for (was_space = true; *str; str++) {
+ if (isspace(*str)) {
+ was_space = true;
+ } else if (was_space) {
+ was_space = false;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * argv_free - free an argv
+ * @argv: the argument vector to be freed
+ *
+ * Frees an argv and the strings it points to.
+ */
+void argv_free(char **argv)
+{
+ argv--;
+ kfree(argv[0]);
+ kfree(argv);
+}
+EXPORT_SYMBOL(argv_free);
+
+/**
+ * argv_split - split a string at whitespace, returning an argv
+ * @gfp: the GFP mask used to allocate memory
+ * @str: the string to be split
+ * @argcp: returned argument count
+ *
+ * Returns: an array of pointers to strings which are split out from
+ * @str. This is performed by strictly splitting on white-space; no
+ * quote processing is performed. Multiple whitespace characters are
+ * considered to be a single argument separator. The returned array
+ * is always NULL-terminated. Returns NULL on memory allocation
+ * failure.
+ *
+ * The source string at `str' may be undergoing concurrent alteration via
+ * userspace sysctl activity (at least). The argv_split() implementation
+ * attempts to handle this gracefully by taking a local copy to work on.
+ */
+char **argv_split(gfp_t gfp, const char *str, int *argcp)
+{
+ char *argv_str;
+ bool was_space;
+ char **argv, **argv_ret;
+ int argc;
+
+ argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
+ if (!argv_str)
+ return NULL;
+
+ argc = count_argc(argv_str);
+ argv = kmalloc_array(argc + 2, sizeof(*argv), gfp);
+ if (!argv) {
+ kfree(argv_str);
+ return NULL;
+ }
+
+ *argv = argv_str;
+ argv_ret = ++argv;
+ for (was_space = true; *argv_str; argv_str++) {
+ if (isspace(*argv_str)) {
+ was_space = true;
+ *argv_str = 0;
+ } else if (was_space) {
+ was_space = false;
+ *argv++ = argv_str;
+ }
+ }
+ *argv = NULL;
+
+ if (argcp)
+ *argcp = argc;
+ return argv_ret;
+}
+EXPORT_SYMBOL(argv_split);
diff --git a/lib/ashldi3.c b/lib/ashldi3.c
new file mode 100644
index 000000000..8e6a69d59
--- /dev/null
+++ b/lib/ashldi3.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+
+#include <linux/export.h>
+
+#include <linux/libgcc.h>
+
+long long notrace __ashldi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.low = 0;
+ w.s.high = (unsigned int) uu.s.low << -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.low >> bm;
+
+ w.s.low = (unsigned int) uu.s.low << b;
+ w.s.high = ((unsigned int) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__ashldi3);
diff --git a/lib/ashrdi3.c b/lib/ashrdi3.c
new file mode 100644
index 000000000..fc84244ad
--- /dev/null
+++ b/lib/ashrdi3.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+
+#include <linux/export.h>
+
+#include <linux/libgcc.h>
+
+long long notrace __ashrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high =
+ uu.s.high >> 31;
+ w.s.low = uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__ashrdi3);
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
new file mode 100644
index 000000000..13da529e2
--- /dev/null
+++ b/lib/asn1_decoder.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Decoder for ASN.1 BER/DER/CER encoded bytestream
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/asn1_decoder.h>
+#include <linux/asn1_ber_bytecode.h>
+
+static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
+ /* OPC TAG JMP ACT */
+ [ASN1_OP_MATCH] = 1 + 1,
+ [ASN1_OP_MATCH_OR_SKIP] = 1 + 1,
+ [ASN1_OP_MATCH_ACT] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_MATCH_ANY] = 1,
+ [ASN1_OP_MATCH_ANY_OR_SKIP] = 1,
+ [ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
+ [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
+ [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
+ [ASN1_OP_COND_MATCH_ANY] = 1,
+ [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1,
+ [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
+ [ASN1_OP_COND_FAIL] = 1,
+ [ASN1_OP_COMPLETE] = 1,
+ [ASN1_OP_ACT] = 1 + 1,
+ [ASN1_OP_MAYBE_ACT] = 1 + 1,
+ [ASN1_OP_RETURN] = 1,
+ [ASN1_OP_END_SEQ] = 1,
+ [ASN1_OP_END_SEQ_OF] = 1 + 1,
+ [ASN1_OP_END_SET] = 1,
+ [ASN1_OP_END_SET_OF] = 1 + 1,
+ [ASN1_OP_END_SEQ_ACT] = 1 + 1,
+ [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1,
+ [ASN1_OP_END_SET_ACT] = 1 + 1,
+ [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1,
+};
+
+/*
+ * Find the length of an indefinite length object
+ * @data: The data buffer
+ * @datalen: The end of the innermost containing element in the buffer
+ * @_dp: The data parse cursor (updated before returning)
+ * @_len: Where to return the size of the element.
+ * @_errmsg: Where to return a pointer to an error message on error
+ */
+static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen,
+ size_t *_dp, size_t *_len,
+ const char **_errmsg)
+{
+ unsigned char tag, tmp;
+ size_t dp = *_dp, len, n;
+ int indef_level = 1;
+
+next_tag:
+ if (unlikely(datalen - dp < 2)) {
+ if (datalen == dp)
+ goto missing_eoc;
+ goto data_overrun_error;
+ }
+
+ /* Extract a tag from the data */
+ tag = data[dp++];
+ if (tag == ASN1_EOC) {
+ /* It appears to be an EOC. */
+ if (data[dp++] != 0)
+ goto invalid_eoc;
+ if (--indef_level <= 0) {
+ *_len = dp - *_dp;
+ *_dp = dp;
+ return 0;
+ }
+ goto next_tag;
+ }
+
+ if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) {
+ do {
+ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ tmp = data[dp++];
+ } while (tmp & 0x80);
+ }
+
+ /* Extract the length */
+ len = data[dp++];
+ if (len <= 0x7f)
+ goto check_length;
+
+ if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
+ /* Indefinite length */
+ if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5))
+ goto indefinite_len_primitive;
+ indef_level++;
+ goto next_tag;
+ }
+
+ n = len - 0x80;
+ if (unlikely(n > sizeof(len) - 1))
+ goto length_too_long;
+ if (unlikely(n > datalen - dp))
+ goto data_overrun_error;
+ len = 0;
+ for (; n > 0; n--) {
+ len <<= 8;
+ len |= data[dp++];
+ }
+check_length:
+ if (len > datalen - dp)
+ goto data_overrun_error;
+ dp += len;
+ goto next_tag;
+
+length_too_long:
+ *_errmsg = "Unsupported length";
+ goto error;
+indefinite_len_primitive:
+ *_errmsg = "Indefinite len primitive not permitted";
+ goto error;
+invalid_eoc:
+ *_errmsg = "Invalid length EOC";
+ goto error;
+data_overrun_error:
+ *_errmsg = "Data overrun error";
+ goto error;
+missing_eoc:
+ *_errmsg = "Missing EOC in indefinite len cons";
+error:
+ *_dp = dp;
+ return -1;
+}
+
+/**
+ * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern
+ * @decoder: The decoder definition (produced by asn1_compiler)
+ * @context: The caller's context (to be passed to the action functions)
+ * @data: The encoded data
+ * @datalen: The size of the encoded data
+ *
+ * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern
+ * produced by asn1_compiler. Action functions are called on marked tags to
+ * allow the caller to retrieve significant data.
+ *
+ * LIMITATIONS:
+ *
+ * To keep down the amount of stack used by this function, the following limits
+ * have been imposed:
+ *
+ * (1) This won't handle datalen > 65535 without increasing the size of the
+ * cons stack elements and length_too_long checking.
+ *
+ * (2) The stack of constructed types is 10 deep. If the depth of non-leaf
+ * constructed types exceeds this, the decode will fail.
+ *
+ * (3) The SET type (not the SET OF type) isn't really supported as tracking
+ * what members of the set have been seen is a pain.
+ */
+int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ void *context,
+ const unsigned char *data,
+ size_t datalen)
+{
+ const unsigned char *machine = decoder->machine;
+ const asn1_action_t *actions = decoder->actions;
+ size_t machlen = decoder->machlen;
+ enum asn1_opcode op;
+ unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0;
+ const char *errmsg;
+ size_t pc = 0, dp = 0, tdp = 0, len = 0;
+ int ret;
+
+ unsigned char flags = 0;
+#define FLAG_INDEFINITE_LENGTH 0x01
+#define FLAG_MATCHED 0x02
+#define FLAG_LAST_MATCHED 0x04 /* Last tag matched */
+#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
+ * - ie. whether or not we are going to parse
+ * a compound type.
+ */
+
+#define NR_CONS_STACK 10
+ unsigned short cons_dp_stack[NR_CONS_STACK];
+ unsigned short cons_datalen_stack[NR_CONS_STACK];
+ unsigned char cons_hdrlen_stack[NR_CONS_STACK];
+#define NR_JUMP_STACK 10
+ unsigned char jump_stack[NR_JUMP_STACK];
+
+ if (datalen > 65535)
+ return -EMSGSIZE;
+
+next_op:
+ pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n",
+ pc, machlen, dp, datalen, csp, jsp);
+ if (unlikely(pc >= machlen))
+ goto machine_overrun_error;
+ op = machine[pc];
+ if (unlikely(pc + asn1_op_lengths[op] > machlen))
+ goto machine_overrun_error;
+
+ /* If this command is meant to match a tag, then do that before
+ * evaluating the command.
+ */
+ if (op <= ASN1_OP__MATCHES_TAG) {
+ unsigned char tmp;
+
+ /* Skip conditional matches if possible */
+ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) ||
+ (op & ASN1_OP_MATCH__SKIP && dp == datalen)) {
+ flags &= ~FLAG_LAST_MATCHED;
+ pc += asn1_op_lengths[op];
+ goto next_op;
+ }
+
+ flags = 0;
+ hdr = 2;
+
+ /* Extract a tag from the data */
+ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ tag = data[dp++];
+ if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
+ goto long_tag_not_supported;
+
+ if (op & ASN1_OP_MATCH__ANY) {
+ pr_debug("- any %02x\n", tag);
+ } else {
+ /* Extract the tag from the machine
+ * - Either CONS or PRIM are permitted in the data if
+ * CONS is not set in the op stream, otherwise CONS
+ * is mandatory.
+ */
+ optag = machine[pc + 1];
+ flags |= optag & FLAG_CONS;
+
+ /* Determine whether the tag matched */
+ tmp = optag ^ tag;
+ tmp &= ~(optag & ASN1_CONS_BIT);
+ pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp);
+ if (tmp != 0) {
+ /* All odd-numbered tags are MATCH_OR_SKIP. */
+ if (op & ASN1_OP_MATCH__SKIP) {
+ pc += asn1_op_lengths[op];
+ dp--;
+ goto next_op;
+ }
+ goto tag_mismatch;
+ }
+ }
+ flags |= FLAG_MATCHED;
+
+ len = data[dp++];
+ if (len > 0x7f) {
+ if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
+ /* Indefinite length */
+ if (unlikely(!(tag & ASN1_CONS_BIT)))
+ goto indefinite_len_primitive;
+ flags |= FLAG_INDEFINITE_LENGTH;
+ if (unlikely(2 > datalen - dp))
+ goto data_overrun_error;
+ } else {
+ int n = len - 0x80;
+ if (unlikely(n > 2))
+ goto length_too_long;
+ if (unlikely(n > datalen - dp))
+ goto data_overrun_error;
+ hdr += n;
+ for (len = 0; n > 0; n--) {
+ len <<= 8;
+ len |= data[dp++];
+ }
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
+ }
+ } else {
+ if (unlikely(len > datalen - dp))
+ goto data_overrun_error;
+ }
+
+ if (flags & FLAG_CONS) {
+ /* For expected compound forms, we stack the positions
+ * of the start and end of the data.
+ */
+ if (unlikely(csp >= NR_CONS_STACK))
+ goto cons_stack_overflow;
+ cons_dp_stack[csp] = dp;
+ cons_hdrlen_stack[csp] = hdr;
+ if (!(flags & FLAG_INDEFINITE_LENGTH)) {
+ cons_datalen_stack[csp] = datalen;
+ datalen = dp + len;
+ } else {
+ cons_datalen_stack[csp] = 0;
+ }
+ csp++;
+ }
+
+ pr_debug("- TAG: %02x %zu%s\n",
+ tag, len, flags & FLAG_CONS ? " CONS" : "");
+ tdp = dp;
+ }
+
+ /* Decide how to handle the operation */
+ switch (op) {
+ case ASN1_OP_MATCH:
+ case ASN1_OP_MATCH_OR_SKIP:
+ case ASN1_OP_MATCH_ACT:
+ case ASN1_OP_MATCH_ACT_OR_SKIP:
+ case ASN1_OP_MATCH_ANY:
+ case ASN1_OP_MATCH_ANY_OR_SKIP:
+ case ASN1_OP_MATCH_ANY_ACT:
+ case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
+ case ASN1_OP_COND_MATCH_OR_SKIP:
+ case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
+ case ASN1_OP_COND_MATCH_ANY:
+ case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
+ case ASN1_OP_COND_MATCH_ANY_ACT:
+ case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
+
+ if (!(flags & FLAG_CONS)) {
+ if (flags & FLAG_INDEFINITE_LENGTH) {
+ size_t tmp = dp;
+
+ ret = asn1_find_indefinite_length(
+ data, datalen, &tmp, &len, &errmsg);
+ if (ret < 0)
+ goto error;
+ }
+ pr_debug("- LEAF: %zu\n", len);
+ }
+
+ if (op & ASN1_OP_MATCH__ACT) {
+ unsigned char act;
+
+ if (op & ASN1_OP_MATCH__ANY)
+ act = machine[pc + 1];
+ else
+ act = machine[pc + 2];
+ ret = actions[act](context, hdr, tag, data + dp, len);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!(flags & FLAG_CONS))
+ dp += len;
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_MATCH_JUMP:
+ case ASN1_OP_MATCH_JUMP_OR_SKIP:
+ case ASN1_OP_COND_MATCH_JUMP_OR_SKIP:
+ pr_debug("- MATCH_JUMP\n");
+ if (unlikely(jsp == NR_JUMP_STACK))
+ goto jump_stack_overflow;
+ jump_stack[jsp++] = pc + asn1_op_lengths[op];
+ pc = machine[pc + 2];
+ goto next_op;
+
+ case ASN1_OP_COND_FAIL:
+ if (unlikely(!(flags & FLAG_MATCHED)))
+ goto tag_mismatch;
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_COMPLETE:
+ if (unlikely(jsp != 0 || csp != 0)) {
+ pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n",
+ jsp, csp);
+ return -EBADMSG;
+ }
+ return 0;
+
+ case ASN1_OP_END_SET:
+ case ASN1_OP_END_SET_ACT:
+ if (unlikely(!(flags & FLAG_MATCHED)))
+ goto tag_mismatch;
+ fallthrough;
+
+ case ASN1_OP_END_SEQ:
+ case ASN1_OP_END_SET_OF:
+ case ASN1_OP_END_SEQ_OF:
+ case ASN1_OP_END_SEQ_ACT:
+ case ASN1_OP_END_SET_OF_ACT:
+ case ASN1_OP_END_SEQ_OF_ACT:
+ if (unlikely(csp <= 0))
+ goto cons_stack_underflow;
+ csp--;
+ tdp = cons_dp_stack[csp];
+ hdr = cons_hdrlen_stack[csp];
+ len = datalen;
+ datalen = cons_datalen_stack[csp];
+ pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n",
+ tdp, dp, len, datalen);
+ if (datalen == 0) {
+ /* Indefinite length - check for the EOC. */
+ datalen = len;
+ if (unlikely(datalen - dp < 2))
+ goto data_overrun_error;
+ if (data[dp++] != 0) {
+ if (op & ASN1_OP_END__OF) {
+ dp--;
+ csp++;
+ pc = machine[pc + 1];
+ pr_debug("- continue\n");
+ goto next_op;
+ }
+ goto missing_eoc;
+ }
+ if (data[dp++] != 0)
+ goto invalid_eoc;
+ len = dp - tdp - 2;
+ } else {
+ if (dp < len && (op & ASN1_OP_END__OF)) {
+ datalen = len;
+ csp++;
+ pc = machine[pc + 1];
+ pr_debug("- continue\n");
+ goto next_op;
+ }
+ if (dp != len)
+ goto cons_length_error;
+ len -= tdp;
+ pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp);
+ }
+
+ if (op & ASN1_OP_END__ACT) {
+ unsigned char act;
+ if (op & ASN1_OP_END__OF)
+ act = machine[pc + 2];
+ else
+ act = machine[pc + 1];
+ ret = actions[act](context, hdr, 0, data + tdp, len);
+ if (ret < 0)
+ return ret;
+ }
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_MAYBE_ACT:
+ if (!(flags & FLAG_LAST_MATCHED)) {
+ pc += asn1_op_lengths[op];
+ goto next_op;
+ }
+ fallthrough;
+
+ case ASN1_OP_ACT:
+ ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
+ if (ret < 0)
+ return ret;
+ pc += asn1_op_lengths[op];
+ goto next_op;
+
+ case ASN1_OP_RETURN:
+ if (unlikely(jsp <= 0))
+ goto jump_stack_underflow;
+ pc = jump_stack[--jsp];
+ flags |= FLAG_MATCHED | FLAG_LAST_MATCHED;
+ goto next_op;
+
+ default:
+ break;
+ }
+
+ /* Shouldn't reach here */
+ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n",
+ op, pc);
+ return -EBADMSG;
+
+data_overrun_error:
+ errmsg = "Data overrun error";
+ goto error;
+machine_overrun_error:
+ errmsg = "Machine overrun error";
+ goto error;
+jump_stack_underflow:
+ errmsg = "Jump stack underflow";
+ goto error;
+jump_stack_overflow:
+ errmsg = "Jump stack overflow";
+ goto error;
+cons_stack_underflow:
+ errmsg = "Cons stack underflow";
+ goto error;
+cons_stack_overflow:
+ errmsg = "Cons stack overflow";
+ goto error;
+cons_length_error:
+ errmsg = "Cons length error";
+ goto error;
+missing_eoc:
+ errmsg = "Missing EOC in indefinite len cons";
+ goto error;
+invalid_eoc:
+ errmsg = "Invalid length EOC";
+ goto error;
+length_too_long:
+ errmsg = "Unsupported length";
+ goto error;
+indefinite_len_primitive:
+ errmsg = "Indefinite len primitive not permitted";
+ goto error;
+tag_mismatch:
+ errmsg = "Unexpected tag";
+ goto error;
+long_tag_not_supported:
+ errmsg = "Long tag not supported";
+error:
+ pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n",
+ errmsg, pc, dp, optag, tag, len);
+ return -EBADMSG;
+}
+EXPORT_SYMBOL_GPL(asn1_ber_decoder);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c
new file mode 100644
index 000000000..0fd3c454a
--- /dev/null
+++ b/lib/asn1_encoder.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Simple encoder primitives for ASN.1 BER/DER/CER
+ *
+ * Copyright (C) 2019 James.Bottomley@HansenPartnership.com
+ */
+
+#include <linux/asn1_encoder.h>
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+/**
+ * asn1_encode_integer() - encode positive integer to ASN.1
+ * @data: pointer to the pointer to the data
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @integer: integer to be encoded
+ *
+ * This is a simplified encoder: it only currently does
+ * positive integers, but it should be simple enough to add the
+ * negative case if a use comes along.
+ */
+unsigned char *
+asn1_encode_integer(unsigned char *data, const unsigned char *end_data,
+ s64 integer)
+{
+ int data_len = end_data - data;
+ unsigned char *d = &data[2];
+ bool found = false;
+ int i;
+
+ if (WARN(integer < 0,
+ "BUG: integer encode only supports positive integers"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ /* need at least 3 bytes for tag, length and integer encoding */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ /* remaining length where at d (the start of the integer encoding) */
+ data_len -= 2;
+
+ data[0] = _tag(UNIV, PRIM, INT);
+ if (integer == 0) {
+ *d++ = 0;
+ goto out;
+ }
+
+ for (i = sizeof(integer); i > 0 ; i--) {
+ int byte = integer >> (8 * (i - 1));
+
+ if (!found && byte == 0)
+ continue;
+
+ /*
+ * for a positive number the first byte must have bit
+ * 7 clear in two's complement (otherwise it's a
+ * negative number) so prepend a leading zero if
+ * that's not the case
+ */
+ if (!found && (byte & 0x80)) {
+ /*
+ * no check needed here, we already know we
+ * have len >= 1
+ */
+ *d++ = 0;
+ data_len--;
+ }
+
+ found = true;
+ if (data_len == 0)
+ return ERR_PTR(-EINVAL);
+
+ *d++ = byte;
+ data_len--;
+ }
+
+ out:
+ data[1] = d - data - 2;
+
+ return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_integer);
+
+/* calculate the base 128 digit values setting the top bit of the first octet */
+static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid)
+{
+ unsigned char *data = *_data;
+ int start = 7 + 7 + 7 + 7;
+ int ret = 0;
+
+ if (*data_len < 1)
+ return -EINVAL;
+
+ /* quick case */
+ if (oid == 0) {
+ *data++ = 0x80;
+ (*data_len)--;
+ goto out;
+ }
+
+ while (oid >> start == 0)
+ start -= 7;
+
+ while (start > 0 && *data_len > 0) {
+ u8 byte;
+
+ byte = oid >> start;
+ oid = oid - (byte << start);
+ start -= 7;
+ byte |= 0x80;
+ *data++ = byte;
+ (*data_len)--;
+ }
+
+ if (*data_len > 0) {
+ *data++ = oid;
+ (*data_len)--;
+ } else {
+ ret = -EINVAL;
+ }
+
+ out:
+ *_data = data;
+ return ret;
+}
+
+/**
+ * asn1_encode_oid() - encode an oid to ASN.1
+ * @data: position to begin encoding at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @oid: array of oids
+ * @oid_len: length of oid array
+ *
+ * this encodes an OID up to ASN.1 when presented as an array of OID values
+ */
+unsigned char *
+asn1_encode_oid(unsigned char *data, const unsigned char *end_data,
+ u32 oid[], int oid_len)
+{
+ int data_len = end_data - data;
+ unsigned char *d = data + 2;
+ int i, ret;
+
+ if (WARN(oid_len < 2, "OID must have at least two elements"))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN(oid_len > 32, "OID is too large"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+
+ /* need at least 3 bytes for tag, length and OID encoding */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ data[0] = _tag(UNIV, PRIM, OID);
+ *d++ = oid[0] * 40 + oid[1];
+
+ data_len -= 3;
+
+ for (i = 2; i < oid_len; i++) {
+ ret = asn1_encode_oid_digit(&d, &data_len, oid[i]);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ data[1] = d - data - 2;
+
+ return d;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_oid);
+
+/**
+ * asn1_encode_length() - encode a length to follow an ASN.1 tag
+ * @data: pointer to encode at
+ * @data_len: pointer to remaining length (adjusted by routine)
+ * @len: length to encode
+ *
+ * This routine can encode lengths up to 65535 using the ASN.1 rules.
+ * It will accept a negative length and place a zero length tag
+ * instead (to keep the ASN.1 valid). This convention allows other
+ * encoder primitives to accept negative lengths as singalling the
+ * sequence will be re-encoded when the length is known.
+ */
+static int asn1_encode_length(unsigned char **data, int *data_len, int len)
+{
+ if (*data_len < 1)
+ return -EINVAL;
+
+ if (len < 0) {
+ *((*data)++) = 0;
+ (*data_len)--;
+ return 0;
+ }
+
+ if (len <= 0x7f) {
+ *((*data)++) = len;
+ (*data_len)--;
+ return 0;
+ }
+
+ if (*data_len < 2)
+ return -EINVAL;
+
+ if (len <= 0xff) {
+ *((*data)++) = 0x81;
+ *((*data)++) = len & 0xff;
+ *data_len -= 2;
+ return 0;
+ }
+
+ if (*data_len < 3)
+ return -EINVAL;
+
+ if (len <= 0xffff) {
+ *((*data)++) = 0x82;
+ *((*data)++) = (len >> 8) & 0xff;
+ *((*data)++) = len & 0xff;
+ *data_len -= 3;
+ return 0;
+ }
+
+ if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff"))
+ return -EINVAL;
+
+ if (*data_len < 4)
+ return -EINVAL;
+ *((*data)++) = 0x83;
+ *((*data)++) = (len >> 16) & 0xff;
+ *((*data)++) = (len >> 8) & 0xff;
+ *((*data)++) = len & 0xff;
+ *data_len -= 4;
+
+ return 0;
+}
+
+/**
+ * asn1_encode_tag() - add a tag for optional or explicit value
+ * @data: pointer to place tag at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @tag: tag to be placed
+ * @string: the data to be tagged
+ * @len: the length of the data to be tagged
+ *
+ * Note this currently only handles short form tags < 31.
+ *
+ * Standard usage is to pass in a @tag, @string and @length and the
+ * @string will be ASN.1 encoded with @tag and placed into @data. If
+ * the encoding would put data past @end_data then an error is
+ * returned, otherwise a pointer to a position one beyond the encoding
+ * is returned.
+ *
+ * To encode in place pass a NULL @string and -1 for @len and the
+ * maximum allowable beginning and end of the data; all this will do
+ * is add the current maximum length and update the data pointer to
+ * the place where the tag contents should be placed is returned. The
+ * data should be copied in by the calling routine which should then
+ * repeat the prior statement but now with the known length. In order
+ * to avoid having to keep both before and after pointers, the repeat
+ * expects to be called with @data pointing to where the first encode
+ * returned it and still NULL for @string but the real length in @len.
+ */
+unsigned char *
+asn1_encode_tag(unsigned char *data, const unsigned char *end_data,
+ u32 tag, const unsigned char *string, int len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (WARN(tag > 30, "ASN.1 tag can't be > 30"))
+ return ERR_PTR(-EINVAL);
+
+ if (!string && WARN(len > 127,
+ "BUG: recode tag is too big (>127)"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ if (!string && len > 0) {
+ /*
+ * we're recoding, so move back to the start of the
+ * tag and install a dummy length because the real
+ * data_len should be NULL
+ */
+ data -= 2;
+ data_len = 2;
+ }
+
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tagn(CONT, CONS, tag);
+ data_len--;
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!string)
+ return data;
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, string, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_tag);
+
+/**
+ * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @string: string to be encoded
+ * @len: length of string
+ *
+ * Note ASN.1 octet strings may contain zeros, so the length is obligatory.
+ */
+unsigned char *
+asn1_encode_octet_string(unsigned char *data,
+ const unsigned char *end_data,
+ const unsigned char *string, u32 len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (IS_ERR(data))
+ return data;
+
+ /* need minimum of 2 bytes for tag and length of zero length string */
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, PRIM, OTS);
+ data_len--;
+
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, string, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_octet_string);
+
+/**
+ * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @seq: data to be encoded as a sequence
+ * @len: length of the data to be encoded as a sequence
+ *
+ * Fill in a sequence. To encode in place, pass NULL for @seq and -1
+ * for @len; then call again once the length is known (still with NULL
+ * for @seq). In order to avoid having to keep both before and after
+ * pointers, the repeat expects to be called with @data pointing to
+ * where the first encode placed it.
+ */
+unsigned char *
+asn1_encode_sequence(unsigned char *data, const unsigned char *end_data,
+ const unsigned char *seq, int len)
+{
+ int data_len = end_data - data;
+ int ret;
+
+ if (!seq && WARN(len > 127,
+ "BUG: recode sequence is too big (>127)"))
+ return ERR_PTR(-EINVAL);
+
+ if (IS_ERR(data))
+ return data;
+
+ if (!seq && len >= 0) {
+ /*
+ * we're recoding, so move back to the start of the
+ * sequence and install a dummy length because the
+ * real length should be NULL
+ */
+ data -= 2;
+ data_len = 2;
+ }
+
+ if (data_len < 2)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, CONS, SEQ);
+ data_len--;
+
+ ret = asn1_encode_length(&data, &data_len, len);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!seq)
+ return data;
+
+ if (data_len < len)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(data, seq, len);
+ data += len;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_sequence);
+
+/**
+ * asn1_encode_boolean() - encode a boolean value to ASN.1
+ * @data: pointer to encode at
+ * @end_data: end of data pointer, points one beyond last usable byte in @data
+ * @val: the boolean true/false value
+ */
+unsigned char *
+asn1_encode_boolean(unsigned char *data, const unsigned char *end_data,
+ bool val)
+{
+ int data_len = end_data - data;
+
+ if (IS_ERR(data))
+ return data;
+
+ /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */
+ if (data_len < 3)
+ return ERR_PTR(-EINVAL);
+
+ *(data++) = _tag(UNIV, PRIM, BOOL);
+ data_len--;
+
+ asn1_encode_length(&data, &data_len, 1);
+
+ if (val)
+ *(data++) = 1;
+ else
+ *(data++) = 0;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(asn1_encode_boolean);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
new file mode 100644
index 000000000..ca0b4f360
--- /dev/null
+++ b/lib/assoc_array.c
@@ -0,0 +1,1726 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Generic associative array implementation.
+ *
+ * See Documentation/core-api/assoc_array.rst for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+//#define DEBUG
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/assoc_array_priv.h>
+
+/*
+ * Iterate over an associative array. The caller must hold the RCU read lock
+ * or better.
+ */
+static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
+ const struct assoc_array_ptr *stop,
+ int (*iterator)(const void *leaf,
+ void *iterator_data),
+ void *iterator_data)
+{
+ const struct assoc_array_shortcut *shortcut;
+ const struct assoc_array_node *node;
+ const struct assoc_array_ptr *cursor, *ptr, *parent;
+ unsigned long has_meta;
+ int slot, ret;
+
+ cursor = root;
+
+begin_node:
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ /* Descend through a shortcut */
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
+ }
+
+ node = assoc_array_ptr_to_node(cursor);
+ slot = 0;
+
+ /* We perform two passes of each node.
+ *
+ * The first pass does all the leaves in this node. This means we
+ * don't miss any leaves if the node is split up by insertion whilst
+ * we're iterating over the branches rooted here (we may, however, see
+ * some leaves twice).
+ */
+ has_meta = 0;
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
+ has_meta |= (unsigned long)ptr;
+ if (ptr && assoc_array_ptr_is_leaf(ptr)) {
+ /* We need a barrier between the read of the pointer,
+ * which is supplied by the above READ_ONCE().
+ */
+ /* Invoke the callback */
+ ret = iterator(assoc_array_ptr_to_leaf(ptr),
+ iterator_data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* The second pass attends to all the metadata pointers. If we follow
+ * one of these we may find that we don't come back here, but rather go
+ * back to a replacement node with the leaves in a different layout.
+ *
+ * We are guaranteed to make progress, however, as the slot number for
+ * a particular portion of the key space cannot change - and we
+ * continue at the back pointer + 1.
+ */
+ if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
+ goto finished_node;
+ slot = 0;
+
+continue_node:
+ node = assoc_array_ptr_to_node(cursor);
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
+ if (assoc_array_ptr_is_meta(ptr)) {
+ cursor = ptr;
+ goto begin_node;
+ }
+ }
+
+finished_node:
+ /* Move up to the parent (may need to skip back over a shortcut) */
+ parent = READ_ONCE(node->back_pointer); /* Address dependency. */
+ slot = node->parent_slot;
+ if (parent == stop)
+ return 0;
+
+ if (assoc_array_ptr_is_shortcut(parent)) {
+ shortcut = assoc_array_ptr_to_shortcut(parent);
+ cursor = parent;
+ parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
+ slot = shortcut->parent_slot;
+ if (parent == stop)
+ return 0;
+ }
+
+ /* Ascend to next slot in parent node */
+ cursor = parent;
+ slot++;
+ goto continue_node;
+}
+
+/**
+ * assoc_array_iterate - Pass all objects in the array to a callback
+ * @array: The array to iterate over.
+ * @iterator: The callback function.
+ * @iterator_data: Private data for the callback function.
+ *
+ * Iterate over all the objects in an associative array. Each one will be
+ * presented to the iterator function.
+ *
+ * If the array is being modified concurrently with the iteration then it is
+ * possible that some objects in the array will be passed to the iterator
+ * callback more than once - though every object should be passed at least
+ * once. If this is undesirable then the caller must lock against modification
+ * for the duration of this function.
+ *
+ * The function will return 0 if no objects were in the array or else it will
+ * return the result of the last iterator function called. Iteration stops
+ * immediately if any call to the iteration function results in a non-zero
+ * return.
+ *
+ * The caller should hold the RCU read lock or better if concurrent
+ * modification is possible.
+ */
+int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data)
+{
+ struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
+
+ if (!root)
+ return 0;
+ return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
+}
+
+enum assoc_array_walk_status {
+ assoc_array_walk_tree_empty,
+ assoc_array_walk_found_terminal_node,
+ assoc_array_walk_found_wrong_shortcut,
+};
+
+struct assoc_array_walk_result {
+ struct {
+ struct assoc_array_node *node; /* Node in which leaf might be found */
+ int level;
+ int slot;
+ } terminal_node;
+ struct {
+ struct assoc_array_shortcut *shortcut;
+ int level;
+ int sc_level;
+ unsigned long sc_segments;
+ unsigned long dissimilarity;
+ } wrong_shortcut;
+};
+
+/*
+ * Navigate through the internal tree looking for the closest node to the key.
+ */
+static enum assoc_array_walk_status
+assoc_array_walk(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *cursor, *ptr;
+ unsigned long sc_segments, dissimilarity;
+ unsigned long segments;
+ int level, sc_level, next_sc_level;
+ int slot;
+
+ pr_devel("-->%s()\n", __func__);
+
+ cursor = READ_ONCE(array->root); /* Address dependency. */
+ if (!cursor)
+ return assoc_array_walk_tree_empty;
+
+ level = 0;
+
+ /* Use segments from the key for the new leaf to navigate through the
+ * internal tree, skipping through nodes and shortcuts that are on
+ * route to the destination. Eventually we'll come to a slot that is
+ * either empty or contains a leaf at which point we've found a node in
+ * which the leaf we're looking for might be found or into which it
+ * should be inserted.
+ */
+jumped:
+ segments = ops->get_key_chunk(index_key, level);
+ pr_devel("segments[%d]: %lx\n", level, segments);
+
+ if (assoc_array_ptr_is_shortcut(cursor))
+ goto follow_shortcut;
+
+consider_node:
+ node = assoc_array_ptr_to_node(cursor);
+ slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ slot &= ASSOC_ARRAY_FAN_MASK;
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
+
+ pr_devel("consider slot %x [ix=%d type=%lu]\n",
+ slot, level, (unsigned long)ptr & 3);
+
+ if (!assoc_array_ptr_is_meta(ptr)) {
+ /* The node doesn't have a node/shortcut pointer in the slot
+ * corresponding to the index key that we have to follow.
+ */
+ result->terminal_node.node = node;
+ result->terminal_node.level = level;
+ result->terminal_node.slot = slot;
+ pr_devel("<--%s() = terminal_node\n", __func__);
+ return assoc_array_walk_found_terminal_node;
+ }
+
+ if (assoc_array_ptr_is_node(ptr)) {
+ /* There is a pointer to a node in the slot corresponding to
+ * this index key segment, so we need to follow it.
+ */
+ cursor = ptr;
+ level += ASSOC_ARRAY_LEVEL_STEP;
+ if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
+ goto consider_node;
+ goto jumped;
+ }
+
+ /* There is a shortcut in the slot corresponding to the index key
+ * segment. We follow the shortcut if its partial index key matches
+ * this leaf's. Otherwise we need to split the shortcut.
+ */
+ cursor = ptr;
+follow_shortcut:
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ pr_devel("shortcut to %d\n", shortcut->skip_to_level);
+ sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
+ BUG_ON(sc_level > shortcut->skip_to_level);
+
+ do {
+ /* Check the leaf against the shortcut's index key a word at a
+ * time, trimming the final word (the shortcut stores the index
+ * key completely from the root to the shortcut's target).
+ */
+ if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
+ segments = ops->get_key_chunk(index_key, sc_level);
+
+ sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
+ dissimilarity = segments ^ sc_segments;
+
+ if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
+ /* Trim segments that are beyond the shortcut */
+ int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ dissimilarity &= ~(ULONG_MAX << shift);
+ next_sc_level = shortcut->skip_to_level;
+ } else {
+ next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ }
+
+ if (dissimilarity != 0) {
+ /* This shortcut points elsewhere */
+ result->wrong_shortcut.shortcut = shortcut;
+ result->wrong_shortcut.level = level;
+ result->wrong_shortcut.sc_level = sc_level;
+ result->wrong_shortcut.sc_segments = sc_segments;
+ result->wrong_shortcut.dissimilarity = dissimilarity;
+ return assoc_array_walk_found_wrong_shortcut;
+ }
+
+ sc_level = next_sc_level;
+ } while (sc_level < shortcut->skip_to_level);
+
+ /* The shortcut matches the leaf's index to this point. */
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
+ if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
+ level = sc_level;
+ goto jumped;
+ } else {
+ level = sc_level;
+ goto consider_node;
+ }
+}
+
+/**
+ * assoc_array_find - Find an object by index key
+ * @array: The associative array to search.
+ * @ops: The operations to use.
+ * @index_key: The key to the object.
+ *
+ * Find an object in an associative array by walking through the internal tree
+ * to the node that should contain the object and then searching the leaves
+ * there. NULL is returned if the requested object was not found in the array.
+ *
+ * The caller must hold the RCU read lock or better.
+ */
+void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key)
+{
+ struct assoc_array_walk_result result;
+ const struct assoc_array_node *node;
+ const struct assoc_array_ptr *ptr;
+ const void *leaf;
+ int slot;
+
+ if (assoc_array_walk(array, ops, index_key, &result) !=
+ assoc_array_walk_found_terminal_node)
+ return NULL;
+
+ node = result.terminal_node.node;
+
+ /* If the target key is available to us, it's has to be pointed to by
+ * the terminal node.
+ */
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
+ if (ptr && assoc_array_ptr_is_leaf(ptr)) {
+ /* We need a barrier between the read of the pointer
+ * and dereferencing the pointer - but only if we are
+ * actually going to dereference it.
+ */
+ leaf = assoc_array_ptr_to_leaf(ptr);
+ if (ops->compare_object(leaf, index_key))
+ return (void *)leaf;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Destructively iterate over an associative array. The caller must prevent
+ * other simultaneous accesses.
+ */
+static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
+ const struct assoc_array_ops *ops)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *cursor, *parent = NULL;
+ int slot = -1;
+
+ pr_devel("-->%s()\n", __func__);
+
+ cursor = root;
+ if (!cursor) {
+ pr_devel("empty\n");
+ return;
+ }
+
+move_to_meta:
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ /* Descend through a shortcut */
+ pr_devel("[%d] shortcut\n", slot);
+ BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ BUG_ON(shortcut->back_pointer != parent);
+ BUG_ON(slot != -1 && shortcut->parent_slot != slot);
+ parent = cursor;
+ cursor = shortcut->next_node;
+ slot = -1;
+ BUG_ON(!assoc_array_ptr_is_node(cursor));
+ }
+
+ pr_devel("[%d] node\n", slot);
+ node = assoc_array_ptr_to_node(cursor);
+ BUG_ON(node->back_pointer != parent);
+ BUG_ON(slot != -1 && node->parent_slot != slot);
+ slot = 0;
+
+continue_node:
+ pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ struct assoc_array_ptr *ptr = node->slots[slot];
+ if (!ptr)
+ continue;
+ if (assoc_array_ptr_is_meta(ptr)) {
+ parent = cursor;
+ cursor = ptr;
+ goto move_to_meta;
+ }
+
+ if (ops) {
+ pr_devel("[%d] free leaf\n", slot);
+ ops->free_object(assoc_array_ptr_to_leaf(ptr));
+ }
+ }
+
+ parent = node->back_pointer;
+ slot = node->parent_slot;
+ pr_devel("free node\n");
+ kfree(node);
+ if (!parent)
+ return; /* Done */
+
+ /* Move back up to the parent (may need to free a shortcut on
+ * the way up) */
+ if (assoc_array_ptr_is_shortcut(parent)) {
+ shortcut = assoc_array_ptr_to_shortcut(parent);
+ BUG_ON(shortcut->next_node != cursor);
+ cursor = parent;
+ parent = shortcut->back_pointer;
+ slot = shortcut->parent_slot;
+ pr_devel("free shortcut\n");
+ kfree(shortcut);
+ if (!parent)
+ return;
+
+ BUG_ON(!assoc_array_ptr_is_node(parent));
+ }
+
+ /* Ascend to next slot in parent node */
+ pr_devel("ascend to %p[%d]\n", parent, slot);
+ cursor = parent;
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+}
+
+/**
+ * assoc_array_destroy - Destroy an associative array
+ * @array: The array to destroy.
+ * @ops: The operations to use.
+ *
+ * Discard all metadata and free all objects in an associative array. The
+ * array will be empty and ready to use again upon completion. This function
+ * cannot fail.
+ *
+ * The caller must prevent all other accesses whilst this takes place as no
+ * attempt is made to adjust pointers gracefully to permit RCU readlock-holding
+ * accesses to continue. On the other hand, no memory allocation is required.
+ */
+void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops)
+{
+ assoc_array_destroy_subtree(array->root, ops);
+ array->root = NULL;
+}
+
+/*
+ * Handle insertion into an empty tree.
+ */
+static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
+{
+ struct assoc_array_node *new_n0;
+
+ pr_devel("-->%s()\n", __func__);
+
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ edit->leaf_p = &new_n0->slots[0];
+ edit->adjust_count_on = new_n0;
+ edit->set[0].ptr = &edit->array->root;
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+
+ pr_devel("<--%s() = ok [no root]\n", __func__);
+ return true;
+}
+
+/*
+ * Handle insertion into a terminal node.
+ */
+static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut, *new_s0;
+ struct assoc_array_node *node, *new_n0, *new_n1, *side;
+ struct assoc_array_ptr *ptr;
+ unsigned long dissimilarity, base_seg, blank;
+ size_t keylen;
+ bool have_meta;
+ int level, diff;
+ int slot, next_slot, free_slot, i, j;
+
+ node = result->terminal_node.node;
+ level = result->terminal_node.level;
+ edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* We arrived at a node which doesn't have an onward node or shortcut
+ * pointer that we have to follow. This means that (a) the leaf we
+ * want must go here (either by insertion or replacement) or (b) we
+ * need to split this node and insert in one of the fragments.
+ */
+ free_slot = -1;
+
+ /* Firstly, we have to check the leaves in this node to see if there's
+ * a matching one we should replace in place.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (!ptr) {
+ free_slot = i;
+ continue;
+ }
+ if (assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key)) {
+ pr_devel("replace in slot %d\n", i);
+ edit->leaf_p = &node->slots[i];
+ edit->dead_leaf = node->slots[i];
+ pr_devel("<--%s() = ok [replace]\n", __func__);
+ return true;
+ }
+ }
+
+ /* If there is a free slot in this node then we can just insert the
+ * leaf here.
+ */
+ if (free_slot >= 0) {
+ pr_devel("insert in free slot %d\n", free_slot);
+ edit->leaf_p = &node->slots[free_slot];
+ edit->adjust_count_on = node;
+ pr_devel("<--%s() = ok [insert]\n", __func__);
+ return true;
+ }
+
+ /* The node has no spare slots - so we're either going to have to split
+ * it or insert another node before it.
+ *
+ * Whatever, we're going to need at least two new nodes - so allocate
+ * those now. We may also need a new shortcut, but we deal with that
+ * when we need it.
+ */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n1)
+ return false;
+ edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
+
+ /* We need to find out how similar the leaves are. */
+ pr_devel("no spare slots\n");
+ have_meta = false;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (assoc_array_ptr_is_meta(ptr)) {
+ edit->segment_cache[i] = 0xff;
+ have_meta = true;
+ continue;
+ }
+ base_seg = ops->get_object_key_chunk(
+ assoc_array_ptr_to_leaf(ptr), level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ }
+
+ if (have_meta) {
+ pr_devel("have meta\n");
+ goto split_node;
+ }
+
+ /* The node contains only leaves */
+ dissimilarity = 0;
+ base_seg = edit->segment_cache[0];
+ for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
+ dissimilarity |= edit->segment_cache[i] ^ base_seg;
+
+ pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
+
+ if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
+ /* The old leaves all cluster in the same slot. We will need
+ * to insert a shortcut if the new node wants to cluster with them.
+ */
+ if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
+ goto all_leaves_cluster_together;
+
+ /* Otherwise all the old leaves cluster in the same slot, but
+ * the new leaf wants to go into a different slot - so we
+ * create a new node (n0) to hold the new leaf and a pointer to
+ * a new node (n1) holding all the old leaves.
+ *
+ * This can be done by falling through to the node splitting
+ * path.
+ */
+ pr_devel("present leaves cluster but not new leaf\n");
+ }
+
+split_node:
+ pr_devel("split node\n");
+
+ /* We need to split the current node. The node must contain anything
+ * from a single leaf (in the one leaf case, this leaf will cluster
+ * with the new leaf) and the rest meta-pointers, to all leaves, some
+ * of which may cluster.
+ *
+ * It won't contain the case in which all the current leaves plus the
+ * new leaves want to cluster in the same slot.
+ *
+ * We need to expel at least two leaves out of a set consisting of the
+ * leaves in the node and the new leaf. The current meta pointers can
+ * just be copied as they shouldn't cluster with any of the leaves.
+ *
+ * We need a new node (n0) to replace the current one and a new node to
+ * take the expelled nodes (n1).
+ */
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = -1; /* Need to calculate this */
+
+do_split_node:
+ pr_devel("do_split_node\n");
+
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ new_n1->nr_leaves_on_branch = 0;
+
+ /* Begin by finding two matching leaves. There have to be at least two
+ * that match - even if there are meta pointers - because any leaf that
+ * would match a slot with a meta pointer in it must be somewhere
+ * behind that meta pointer and cannot be here. Further, given N
+ * remaining leaf slots, we now have N+1 leaves to go in them.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ slot = edit->segment_cache[i];
+ if (slot != 0xff)
+ for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
+ if (edit->segment_cache[j] == slot)
+ goto found_slot_for_multiple_occupancy;
+ }
+found_slot_for_multiple_occupancy:
+ pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
+ BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
+ BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
+ BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
+
+ new_n1->parent_slot = slot;
+
+ /* Metadata pointers cannot change slot */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+ if (assoc_array_ptr_is_meta(node->slots[i]))
+ new_n0->slots[i] = node->slots[i];
+ else
+ new_n0->slots[i] = NULL;
+ BUG_ON(new_n0->slots[slot] != NULL);
+ new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
+
+ /* Filter the leaf pointers between the new nodes */
+ free_slot = -1;
+ next_slot = 0;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ if (assoc_array_ptr_is_meta(node->slots[i]))
+ continue;
+ if (edit->segment_cache[i] == slot) {
+ new_n1->slots[next_slot++] = node->slots[i];
+ new_n1->nr_leaves_on_branch++;
+ } else {
+ do {
+ free_slot++;
+ } while (new_n0->slots[free_slot] != NULL);
+ new_n0->slots[free_slot] = node->slots[i];
+ }
+ }
+
+ pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
+
+ if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
+ do {
+ free_slot++;
+ } while (new_n0->slots[free_slot] != NULL);
+ edit->leaf_p = &new_n0->slots[free_slot];
+ edit->adjust_count_on = new_n0;
+ } else {
+ edit->leaf_p = &new_n1->slots[next_slot++];
+ edit->adjust_count_on = new_n1;
+ }
+
+ BUG_ON(next_slot <= 1);
+
+ edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ if (edit->segment_cache[i] == 0xff) {
+ ptr = node->slots[i];
+ BUG_ON(assoc_array_ptr_is_leaf(ptr));
+ if (assoc_array_ptr_is_node(ptr)) {
+ side = assoc_array_ptr_to_node(ptr);
+ edit->set_backpointers[i] = &side->back_pointer;
+ } else {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ edit->set_backpointers[i] = &shortcut->back_pointer;
+ }
+ }
+ }
+
+ ptr = node->back_pointer;
+ if (!ptr)
+ edit->set[0].ptr = &edit->array->root;
+ else if (assoc_array_ptr_is_node(ptr))
+ edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
+ else
+ edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
+ edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+ pr_devel("<--%s() = ok [split node]\n", __func__);
+ return true;
+
+all_leaves_cluster_together:
+ /* All the leaves, new and old, want to cluster together in this node
+ * in the same slot, so we have to replace this node with a shortcut to
+ * skip over the identical parts of the key and then place a pair of
+ * nodes, one inside the other, at the end of the shortcut and
+ * distribute the keys between them.
+ *
+ * Firstly we need to work out where the leaves start diverging as a
+ * bit position into their keys so that we know how big the shortcut
+ * needs to be.
+ *
+ * We only need to make a single pass of N of the N+1 leaves because if
+ * any keys differ between themselves at bit X then at least one of
+ * them must also differ with the base key at bit X or before.
+ */
+ pr_devel("all leaves cluster together\n");
+ diff = INT_MAX;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
+ index_key);
+ if (x < diff) {
+ BUG_ON(x < 0);
+ diff = x;
+ }
+ }
+ BUG_ON(diff == INT_MAX);
+ BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
+
+ keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s0 = kzalloc(struct_size(new_s0, index_key, keylen), GFP_KERNEL);
+ if (!new_s0)
+ return false;
+ edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
+
+ edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
+ new_s0->back_pointer = node->back_pointer;
+ new_s0->parent_slot = node->parent_slot;
+ new_s0->next_node = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
+ new_n0->parent_slot = 0;
+ new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_n1->parent_slot = -1; /* Need to calculate this */
+
+ new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
+ pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
+ BUG_ON(level <= 0);
+
+ for (i = 0; i < keylen; i++)
+ new_s0->index_key[i] =
+ ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
+
+ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ }
+
+ /* This now reduces to a node splitting exercise for which we'll need
+ * to regenerate the disparity table.
+ */
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
+ level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ }
+
+ base_seg = ops->get_key_chunk(index_key, level);
+ base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
+ edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
+ goto do_split_node;
+}
+
+/*
+ * Handle insertion into the middle of a shortcut.
+ */
+static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
+ const struct assoc_array_ops *ops,
+ struct assoc_array_walk_result *result)
+{
+ struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
+ struct assoc_array_node *node, *new_n0, *side;
+ unsigned long sc_segments, dissimilarity, blank;
+ size_t keylen;
+ int level, sc_level, diff;
+ int sc_slot;
+
+ shortcut = result->wrong_shortcut.shortcut;
+ level = result->wrong_shortcut.level;
+ sc_level = result->wrong_shortcut.sc_level;
+ sc_segments = result->wrong_shortcut.sc_segments;
+ dissimilarity = result->wrong_shortcut.dissimilarity;
+
+ pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
+ __func__, level, dissimilarity, sc_level);
+
+ /* We need to split a shortcut and insert a node between the two
+ * pieces. Zero-length pieces will be dispensed with entirely.
+ *
+ * First of all, we need to find out in which level the first
+ * difference was.
+ */
+ diff = __ffs(dissimilarity);
+ diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
+ diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
+ pr_devel("diff=%d\n", diff);
+
+ if (!shortcut->back_pointer) {
+ edit->set[0].ptr = &edit->array->root;
+ } else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
+ node = assoc_array_ptr_to_node(shortcut->back_pointer);
+ edit->set[0].ptr = &node->slots[shortcut->parent_slot];
+ } else {
+ BUG();
+ }
+
+ edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
+
+ /* Create a new node now since we're going to need it anyway */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ return false;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+ edit->adjust_count_on = new_n0;
+
+ /* Insert a new shortcut before the new node if this segment isn't of
+ * zero length - otherwise we just connect the new node directly to the
+ * parent.
+ */
+ level += ASSOC_ARRAY_LEVEL_STEP;
+ if (diff > level) {
+ pr_devel("pre-shortcut %d...%d\n", level, diff);
+ keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s0 = kzalloc(struct_size(new_s0, index_key, keylen),
+ GFP_KERNEL);
+ if (!new_s0)
+ return false;
+ edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
+ edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
+ new_s0->back_pointer = shortcut->back_pointer;
+ new_s0->parent_slot = shortcut->parent_slot;
+ new_s0->next_node = assoc_array_node_to_ptr(new_n0);
+ new_s0->skip_to_level = diff;
+
+ new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
+ new_n0->parent_slot = 0;
+
+ memcpy(new_s0->index_key, shortcut->index_key,
+ flex_array_size(new_s0, index_key, keylen));
+
+ blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ } else {
+ pr_devel("no pre-shortcut\n");
+ edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+ new_n0->back_pointer = shortcut->back_pointer;
+ new_n0->parent_slot = shortcut->parent_slot;
+ }
+
+ side = assoc_array_ptr_to_node(shortcut->next_node);
+ new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
+
+ /* We need to know which slot in the new node is going to take a
+ * metadata pointer.
+ */
+ sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ sc_slot &= ASSOC_ARRAY_FAN_MASK;
+
+ pr_devel("new slot %lx >> %d -> %d\n",
+ sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
+
+ /* Determine whether we need to follow the new node with a replacement
+ * for the current shortcut. We could in theory reuse the current
+ * shortcut if its parent slot number doesn't change - but that's a
+ * 1-in-16 chance so not worth expending the code upon.
+ */
+ level = diff + ASSOC_ARRAY_LEVEL_STEP;
+ if (level < shortcut->skip_to_level) {
+ pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
+ keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+
+ new_s1 = kzalloc(struct_size(new_s1, index_key, keylen),
+ GFP_KERNEL);
+ if (!new_s1)
+ return false;
+ edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
+
+ new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
+ new_s1->parent_slot = sc_slot;
+ new_s1->next_node = shortcut->next_node;
+ new_s1->skip_to_level = shortcut->skip_to_level;
+
+ new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
+
+ memcpy(new_s1->index_key, shortcut->index_key,
+ flex_array_size(new_s1, index_key, keylen));
+
+ edit->set[1].ptr = &side->back_pointer;
+ edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
+ } else {
+ pr_devel("no post-shortcut\n");
+
+ /* We don't have to replace the pointed-to node as long as we
+ * use memory barriers to make sure the parent slot number is
+ * changed before the back pointer (the parent slot number is
+ * irrelevant to the old parent shortcut).
+ */
+ new_n0->slots[sc_slot] = shortcut->next_node;
+ edit->set_parent_slot[0].p = &side->parent_slot;
+ edit->set_parent_slot[0].to = sc_slot;
+ edit->set[1].ptr = &side->back_pointer;
+ edit->set[1].to = assoc_array_node_to_ptr(new_n0);
+ }
+
+ /* Install the new leaf in a spare slot in the new node. */
+ if (sc_slot == 0)
+ edit->leaf_p = &new_n0->slots[1];
+ else
+ edit->leaf_p = &new_n0->slots[0];
+
+ pr_devel("<--%s() = ok [split shortcut]\n", __func__);
+ return edit;
+}
+
+/**
+ * assoc_array_insert - Script insertion of an object into an associative array
+ * @array: The array to insert into.
+ * @ops: The operations to use.
+ * @index_key: The key to insert at.
+ * @object: The object to insert.
+ *
+ * Precalculate and preallocate a script for the insertion or replacement of an
+ * object in an associative array. This results in an edit script that can
+ * either be applied or cancelled.
+ *
+ * The function returns a pointer to an edit script or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object)
+{
+ struct assoc_array_walk_result result;
+ struct assoc_array_edit *edit;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* The leaf pointer we're given must not have the bottom bit set as we
+ * use those for type-marking the pointer. NULL pointers are also not
+ * allowed as they indicate an empty slot but we have to allow them
+ * here as they can be updated later.
+ */
+ BUG_ON(assoc_array_ptr_is_meta(object));
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->leaf = assoc_array_leaf_to_ptr(object);
+ edit->adjust_count_by = 1;
+
+ switch (assoc_array_walk(array, ops, index_key, &result)) {
+ case assoc_array_walk_tree_empty:
+ /* Allocate a root node if there isn't one yet */
+ if (!assoc_array_insert_in_empty_tree(edit))
+ goto enomem;
+ return edit;
+
+ case assoc_array_walk_found_terminal_node:
+ /* We found a node that doesn't have a node/shortcut pointer in
+ * the slot corresponding to the index key that we have to
+ * follow.
+ */
+ if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
+ &result))
+ goto enomem;
+ return edit;
+
+ case assoc_array_walk_found_wrong_shortcut:
+ /* We found a shortcut that didn't match our key in a slot we
+ * needed to follow.
+ */
+ if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
+ goto enomem;
+ return edit;
+ }
+
+enomem:
+ /* Clean up after an out of memory error */
+ pr_devel("enomem\n");
+ assoc_array_cancel_edit(edit);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * assoc_array_insert_set_object - Set the new object pointer in an edit script
+ * @edit: The edit script to modify.
+ * @object: The object pointer to set.
+ *
+ * Change the object to be inserted in an edit script. The object pointed to
+ * by the old object is not freed. This must be done prior to applying the
+ * script.
+ */
+void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
+{
+ BUG_ON(!object);
+ edit->leaf = assoc_array_leaf_to_ptr(object);
+}
+
+struct assoc_array_delete_collapse_context {
+ struct assoc_array_node *node;
+ const void *skip_leaf;
+ int slot;
+};
+
+/*
+ * Subtree collapse to node iterator.
+ */
+static int assoc_array_delete_collapse_iterator(const void *leaf,
+ void *iterator_data)
+{
+ struct assoc_array_delete_collapse_context *collapse = iterator_data;
+
+ if (leaf == collapse->skip_leaf)
+ return 0;
+
+ BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
+
+ collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
+ return 0;
+}
+
+/**
+ * assoc_array_delete - Script deletion of an object from an associative array
+ * @array: The array to search.
+ * @ops: The operations to use.
+ * @index_key: The key to the object.
+ *
+ * Precalculate and preallocate a script for the deletion of an object from an
+ * associative array. This results in an edit script that can either be
+ * applied or cancelled.
+ *
+ * The function returns a pointer to an edit script if the object was found,
+ * NULL if the object was not found or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key)
+{
+ struct assoc_array_delete_collapse_context collapse;
+ struct assoc_array_walk_result result;
+ struct assoc_array_node *node, *new_n0;
+ struct assoc_array_edit *edit;
+ struct assoc_array_ptr *ptr;
+ bool has_meta;
+ int slot, i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->adjust_count_by = -1;
+
+ switch (assoc_array_walk(array, ops, index_key, &result)) {
+ case assoc_array_walk_found_terminal_node:
+ /* We found a node that should contain the leaf we've been
+ * asked to remove - *if* it's in the tree.
+ */
+ pr_devel("terminal_node\n");
+ node = result.terminal_node.node;
+
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = node->slots[slot];
+ if (ptr &&
+ assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key))
+ goto found_leaf;
+ }
+ fallthrough;
+ case assoc_array_walk_tree_empty:
+ case assoc_array_walk_found_wrong_shortcut:
+ default:
+ assoc_array_cancel_edit(edit);
+ pr_devel("not found\n");
+ return NULL;
+ }
+
+found_leaf:
+ BUG_ON(array->nr_leaves_on_tree <= 0);
+
+ /* In the simplest form of deletion we just clear the slot and release
+ * the leaf after a suitable interval.
+ */
+ edit->dead_leaf = node->slots[slot];
+ edit->set[0].ptr = &node->slots[slot];
+ edit->set[0].to = NULL;
+ edit->adjust_count_on = node;
+
+ /* If that concludes erasure of the last leaf, then delete the entire
+ * internal array.
+ */
+ if (array->nr_leaves_on_tree == 1) {
+ edit->set[1].ptr = &array->root;
+ edit->set[1].to = NULL;
+ edit->adjust_count_on = NULL;
+ edit->excised_subtree = array->root;
+ pr_devel("all gone\n");
+ return edit;
+ }
+
+ /* However, we'd also like to clear up some metadata blocks if we
+ * possibly can.
+ *
+ * We go for a simple algorithm of: if this node has FAN_OUT or fewer
+ * leaves in it, then attempt to collapse it - and attempt to
+ * recursively collapse up the tree.
+ *
+ * We could also try and collapse in partially filled subtrees to take
+ * up space in this node.
+ */
+ if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
+ struct assoc_array_node *parent, *grandparent;
+ struct assoc_array_ptr *ptr;
+
+ /* First of all, we need to know if this node has metadata so
+ * that we don't try collapsing if all the leaves are already
+ * here.
+ */
+ has_meta = false;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ ptr = node->slots[i];
+ if (assoc_array_ptr_is_meta(ptr)) {
+ has_meta = true;
+ break;
+ }
+ }
+
+ pr_devel("leaves: %ld [m=%d]\n",
+ node->nr_leaves_on_branch - 1, has_meta);
+
+ /* Look further up the tree to see if we can collapse this node
+ * into a more proximal node too.
+ */
+ parent = node;
+ collapse_up:
+ pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
+
+ ptr = parent->back_pointer;
+ if (!ptr)
+ goto do_collapse;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
+ ptr = s->back_pointer;
+ if (!ptr)
+ goto do_collapse;
+ }
+
+ grandparent = assoc_array_ptr_to_node(ptr);
+ if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
+ parent = grandparent;
+ goto collapse_up;
+ }
+
+ do_collapse:
+ /* There's no point collapsing if the original node has no meta
+ * pointers to discard and if we didn't merge into one of that
+ * node's ancestry.
+ */
+ if (has_meta || parent != node) {
+ node = parent;
+
+ /* Create a new node to collapse into */
+ new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n0)
+ goto enomem;
+ edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
+
+ new_n0->back_pointer = node->back_pointer;
+ new_n0->parent_slot = node->parent_slot;
+ new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+ edit->adjust_count_on = new_n0;
+
+ collapse.node = new_n0;
+ collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
+ collapse.slot = 0;
+ assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
+ node->back_pointer,
+ assoc_array_delete_collapse_iterator,
+ &collapse);
+ pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
+ BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
+
+ if (!node->back_pointer) {
+ edit->set[1].ptr = &array->root;
+ } else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
+ BUG();
+ } else if (assoc_array_ptr_is_node(node->back_pointer)) {
+ struct assoc_array_node *p =
+ assoc_array_ptr_to_node(node->back_pointer);
+ edit->set[1].ptr = &p->slots[node->parent_slot];
+ } else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(node->back_pointer);
+ edit->set[1].ptr = &s->next_node;
+ }
+ edit->set[1].to = assoc_array_node_to_ptr(new_n0);
+ edit->excised_subtree = assoc_array_node_to_ptr(node);
+ }
+ }
+
+ return edit;
+
+enomem:
+ /* Clean up after an out of memory error */
+ pr_devel("enomem\n");
+ assoc_array_cancel_edit(edit);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * assoc_array_clear - Script deletion of all objects from an associative array
+ * @array: The array to clear.
+ * @ops: The operations to use.
+ *
+ * Precalculate and preallocate a script for the deletion of all the objects
+ * from an associative array. This results in an edit script that can either
+ * be applied or cancelled.
+ *
+ * The function returns a pointer to an edit script if there are objects to be
+ * deleted, NULL if there are no objects in the array or -ENOMEM.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops)
+{
+ struct assoc_array_edit *edit;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (!array->root)
+ return NULL;
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return ERR_PTR(-ENOMEM);
+ edit->array = array;
+ edit->ops = ops;
+ edit->set[1].ptr = &array->root;
+ edit->set[1].to = NULL;
+ edit->excised_subtree = array->root;
+ edit->ops_for_excised_subtree = ops;
+ pr_devel("all gone\n");
+ return edit;
+}
+
+/*
+ * Handle the deferred destruction after an applied edit.
+ */
+static void assoc_array_rcu_cleanup(struct rcu_head *head)
+{
+ struct assoc_array_edit *edit =
+ container_of(head, struct assoc_array_edit, rcu);
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (edit->dead_leaf)
+ edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
+ for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
+ if (edit->excised_meta[i])
+ kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
+
+ if (edit->excised_subtree) {
+ BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
+ if (assoc_array_ptr_is_node(edit->excised_subtree)) {
+ struct assoc_array_node *n =
+ assoc_array_ptr_to_node(edit->excised_subtree);
+ n->back_pointer = NULL;
+ } else {
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(edit->excised_subtree);
+ s->back_pointer = NULL;
+ }
+ assoc_array_destroy_subtree(edit->excised_subtree,
+ edit->ops_for_excised_subtree);
+ }
+
+ kfree(edit);
+}
+
+/**
+ * assoc_array_apply_edit - Apply an edit script to an associative array
+ * @edit: The script to apply.
+ *
+ * Apply an edit script to an associative array to effect an insertion,
+ * deletion or clearance. As the edit script includes preallocated memory,
+ * this is guaranteed not to fail.
+ *
+ * The edit script, dead objects and dead metadata will be scheduled for
+ * destruction after an RCU grace period to permit those doing read-only
+ * accesses on the array to continue to do so under the RCU read lock whilst
+ * the edit is taking place.
+ */
+void assoc_array_apply_edit(struct assoc_array_edit *edit)
+{
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ smp_wmb();
+ if (edit->leaf_p)
+ *edit->leaf_p = edit->leaf;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
+ if (edit->set_parent_slot[i].p)
+ *edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
+ if (edit->set_backpointers[i])
+ *edit->set_backpointers[i] = edit->set_backpointers_to;
+
+ smp_wmb();
+ for (i = 0; i < ARRAY_SIZE(edit->set); i++)
+ if (edit->set[i].ptr)
+ *edit->set[i].ptr = edit->set[i].to;
+
+ if (edit->array->root == NULL) {
+ edit->array->nr_leaves_on_tree = 0;
+ } else if (edit->adjust_count_on) {
+ node = edit->adjust_count_on;
+ for (;;) {
+ node->nr_leaves_on_branch += edit->adjust_count_by;
+
+ ptr = node->back_pointer;
+ if (!ptr)
+ break;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = shortcut->back_pointer;
+ if (!ptr)
+ break;
+ }
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ node = assoc_array_ptr_to_node(ptr);
+ }
+
+ edit->array->nr_leaves_on_tree += edit->adjust_count_by;
+ }
+
+ call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
+}
+
+/**
+ * assoc_array_cancel_edit - Discard an edit script.
+ * @edit: The script to discard.
+ *
+ * Free an edit script and all the preallocated data it holds without making
+ * any changes to the associative array it was intended for.
+ *
+ * NOTE! In the case of an insertion script, this does _not_ release the leaf
+ * that was to be inserted. That is left to the caller.
+ */
+void assoc_array_cancel_edit(struct assoc_array_edit *edit)
+{
+ struct assoc_array_ptr *ptr;
+ int i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ /* Clean up after an out of memory error */
+ for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
+ ptr = edit->new_meta[i];
+ if (ptr) {
+ if (assoc_array_ptr_is_node(ptr))
+ kfree(assoc_array_ptr_to_node(ptr));
+ else
+ kfree(assoc_array_ptr_to_shortcut(ptr));
+ }
+ }
+ kfree(edit);
+}
+
+/**
+ * assoc_array_gc - Garbage collect an associative array.
+ * @array: The array to clean.
+ * @ops: The operations to use.
+ * @iterator: A callback function to pass judgement on each object.
+ * @iterator_data: Private data for the callback function.
+ *
+ * Collect garbage from an associative array and pack down the internal tree to
+ * save memory.
+ *
+ * The iterator function is asked to pass judgement upon each object in the
+ * array. If it returns false, the object is discard and if it returns true,
+ * the object is kept. If it returns true, it must increment the object's
+ * usage count (or whatever it needs to do to retain it) before returning.
+ *
+ * This function returns 0 if successful or -ENOMEM if out of memory. In the
+ * latter case, the array is not changed.
+ *
+ * The caller should lock against other modifications and must continue to hold
+ * the lock until assoc_array_apply_edit() has been called.
+ *
+ * Accesses to the tree may take place concurrently with this function,
+ * provided they hold the RCU read lock.
+ */
+int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data)
+{
+ struct assoc_array_shortcut *shortcut, *new_s;
+ struct assoc_array_node *node, *new_n;
+ struct assoc_array_edit *edit;
+ struct assoc_array_ptr *cursor, *ptr;
+ struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
+ unsigned long nr_leaves_on_tree;
+ bool retained;
+ int keylen, slot, nr_free, next_slot, i;
+
+ pr_devel("-->%s()\n", __func__);
+
+ if (!array->root)
+ return 0;
+
+ edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
+ if (!edit)
+ return -ENOMEM;
+ edit->array = array;
+ edit->ops = ops;
+ edit->ops_for_excised_subtree = ops;
+ edit->set[0].ptr = &array->root;
+ edit->excised_subtree = array->root;
+
+ new_root = new_parent = NULL;
+ new_ptr_pp = &new_root;
+ cursor = array->root;
+
+descend:
+ /* If this point is a shortcut, then we need to duplicate it and
+ * advance the target cursor.
+ */
+ if (assoc_array_ptr_is_shortcut(cursor)) {
+ shortcut = assoc_array_ptr_to_shortcut(cursor);
+ keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
+ keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
+ new_s = kmalloc(struct_size(new_s, index_key, keylen),
+ GFP_KERNEL);
+ if (!new_s)
+ goto enomem;
+ pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
+ memcpy(new_s, shortcut, struct_size(new_s, index_key, keylen));
+ new_s->back_pointer = new_parent;
+ new_s->parent_slot = shortcut->parent_slot;
+ *new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
+ new_ptr_pp = &new_s->next_node;
+ cursor = shortcut->next_node;
+ }
+
+ /* Duplicate the node at this position */
+ node = assoc_array_ptr_to_node(cursor);
+ new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
+ if (!new_n)
+ goto enomem;
+ pr_devel("dup node %p -> %p\n", node, new_n);
+ new_n->back_pointer = new_parent;
+ new_n->parent_slot = node->parent_slot;
+ *new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
+ new_ptr_pp = NULL;
+ slot = 0;
+
+continue_node:
+ /* Filter across any leaves and gc any subtrees */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = node->slots[slot];
+ if (!ptr)
+ continue;
+
+ if (assoc_array_ptr_is_leaf(ptr)) {
+ if (iterator(assoc_array_ptr_to_leaf(ptr),
+ iterator_data))
+ /* The iterator will have done any reference
+ * counting on the object for us.
+ */
+ new_n->slots[slot] = ptr;
+ continue;
+ }
+
+ new_ptr_pp = &new_n->slots[slot];
+ cursor = ptr;
+ goto descend;
+ }
+
+retry_compress:
+ pr_devel("-- compress node %p --\n", new_n);
+
+ /* Count up the number of empty slots in this node and work out the
+ * subtree leaf count.
+ */
+ new_n->nr_leaves_on_branch = 0;
+ nr_free = 0;
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = new_n->slots[slot];
+ if (!ptr)
+ nr_free++;
+ else if (assoc_array_ptr_is_leaf(ptr))
+ new_n->nr_leaves_on_branch++;
+ }
+ pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
+
+ /* See what we can fold in */
+ retained = false;
+ next_slot = 0;
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ struct assoc_array_shortcut *s;
+ struct assoc_array_node *child;
+
+ ptr = new_n->slots[slot];
+ if (!ptr || assoc_array_ptr_is_leaf(ptr))
+ continue;
+
+ s = NULL;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ s = assoc_array_ptr_to_shortcut(ptr);
+ ptr = s->next_node;
+ }
+
+ child = assoc_array_ptr_to_node(ptr);
+ new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
+
+ if (child->nr_leaves_on_branch <= nr_free + 1) {
+ /* Fold the child node into this one */
+ pr_devel("[%d] fold node %lu/%d [nx %d]\n",
+ slot, child->nr_leaves_on_branch, nr_free + 1,
+ next_slot);
+
+ /* We would already have reaped an intervening shortcut
+ * on the way back up the tree.
+ */
+ BUG_ON(s);
+
+ new_n->slots[slot] = NULL;
+ nr_free++;
+ if (slot < next_slot)
+ next_slot = slot;
+ for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
+ struct assoc_array_ptr *p = child->slots[i];
+ if (!p)
+ continue;
+ BUG_ON(assoc_array_ptr_is_meta(p));
+ while (new_n->slots[next_slot])
+ next_slot++;
+ BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
+ new_n->slots[next_slot++] = p;
+ nr_free--;
+ }
+ kfree(child);
+ } else {
+ pr_devel("[%d] retain node %lu/%d [nx %d]\n",
+ slot, child->nr_leaves_on_branch, nr_free + 1,
+ next_slot);
+ retained = true;
+ }
+ }
+
+ if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+ pr_devel("internal nodes remain despite enough space, retrying\n");
+ goto retry_compress;
+ }
+ pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
+
+ nr_leaves_on_tree = new_n->nr_leaves_on_branch;
+
+ /* Excise this node if it is singly occupied by a shortcut */
+ if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
+ for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
+ if ((ptr = new_n->slots[slot]))
+ break;
+
+ if (assoc_array_ptr_is_meta(ptr) &&
+ assoc_array_ptr_is_shortcut(ptr)) {
+ pr_devel("excise node %p with 1 shortcut\n", new_n);
+ new_s = assoc_array_ptr_to_shortcut(ptr);
+ new_parent = new_n->back_pointer;
+ slot = new_n->parent_slot;
+ kfree(new_n);
+ if (!new_parent) {
+ new_s->back_pointer = NULL;
+ new_s->parent_slot = 0;
+ new_root = ptr;
+ goto gc_complete;
+ }
+
+ if (assoc_array_ptr_is_shortcut(new_parent)) {
+ /* We can discard any preceding shortcut also */
+ struct assoc_array_shortcut *s =
+ assoc_array_ptr_to_shortcut(new_parent);
+
+ pr_devel("excise preceding shortcut\n");
+
+ new_parent = new_s->back_pointer = s->back_pointer;
+ slot = new_s->parent_slot = s->parent_slot;
+ kfree(s);
+ if (!new_parent) {
+ new_s->back_pointer = NULL;
+ new_s->parent_slot = 0;
+ new_root = ptr;
+ goto gc_complete;
+ }
+ }
+
+ new_s->back_pointer = new_parent;
+ new_s->parent_slot = slot;
+ new_n = assoc_array_ptr_to_node(new_parent);
+ new_n->slots[slot] = ptr;
+ goto ascend_old_tree;
+ }
+ }
+
+ /* Excise any shortcuts we might encounter that point to nodes that
+ * only contain leaves.
+ */
+ ptr = new_n->back_pointer;
+ if (!ptr)
+ goto gc_complete;
+
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ new_s = assoc_array_ptr_to_shortcut(ptr);
+ new_parent = new_s->back_pointer;
+ slot = new_s->parent_slot;
+
+ if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
+ struct assoc_array_node *n;
+
+ pr_devel("excise shortcut\n");
+ new_n->back_pointer = new_parent;
+ new_n->parent_slot = slot;
+ kfree(new_s);
+ if (!new_parent) {
+ new_root = assoc_array_node_to_ptr(new_n);
+ goto gc_complete;
+ }
+
+ n = assoc_array_ptr_to_node(new_parent);
+ n->slots[slot] = assoc_array_node_to_ptr(new_n);
+ }
+ } else {
+ new_parent = ptr;
+ }
+ new_n = assoc_array_ptr_to_node(new_parent);
+
+ascend_old_tree:
+ ptr = node->back_pointer;
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ slot = shortcut->parent_slot;
+ cursor = shortcut->back_pointer;
+ if (!cursor)
+ goto gc_complete;
+ } else {
+ slot = node->parent_slot;
+ cursor = ptr;
+ }
+ BUG_ON(!cursor);
+ node = assoc_array_ptr_to_node(cursor);
+ slot++;
+ goto continue_node;
+
+gc_complete:
+ edit->set[0].to = new_root;
+ assoc_array_apply_edit(edit);
+ array->nr_leaves_on_tree = nr_leaves_on_tree;
+ return 0;
+
+enomem:
+ pr_devel("enomem\n");
+ assoc_array_destroy_subtree(new_root, edit->ops);
+ kfree(edit);
+ return -ENOMEM;
+}
diff --git a/lib/atomic64.c b/lib/atomic64.c
new file mode 100644
index 000000000..caf895789
--- /dev/null
+++ b/lib/atomic64.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic implementation of 64-bit atomics using spinlocks,
+ * useful on processors that don't have 64-bit atomic instructions.
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/atomic.h>
+
+/*
+ * We use a hashed array of spinlocks to provide exclusive access
+ * to each atomic64_t variable. Since this is expected to used on
+ * systems with small numbers of CPUs (<= 4 or so), we use a
+ * relatively small array of 16 spinlocks to avoid wasting too much
+ * memory on the spinlock array.
+ */
+#define NR_LOCKS 16
+
+/*
+ * Ensure each lock is in a separate cacheline.
+ */
+static union {
+ raw_spinlock_t lock;
+ char pad[L1_CACHE_BYTES];
+} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
+ [0 ... (NR_LOCKS - 1)] = {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ },
+};
+
+static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+{
+ unsigned long addr = (unsigned long) v;
+
+ addr >>= L1_CACHE_SHIFT;
+ addr ^= (addr >> 8) ^ (addr >> 16);
+ return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
+}
+
+s64 generic_atomic64_read(const atomic64_t *v)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ s64 val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+EXPORT_SYMBOL(generic_atomic64_read);
+
+void generic_atomic64_set(atomic64_t *v, s64 i)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+
+ raw_spin_lock_irqsave(lock, flags);
+ v->counter = i;
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+EXPORT_SYMBOL(generic_atomic64_set);
+
+#define ATOMIC64_OP(op, c_op) \
+void generic_atomic64_##op(s64 a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+} \
+EXPORT_SYMBOL(generic_atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, c_op) \
+s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ s64 val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = (v->counter c_op a); \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(generic_atomic64_##op##_return);
+
+#define ATOMIC64_FETCH_OP(op, c_op) \
+s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
+{ \
+ unsigned long flags; \
+ raw_spinlock_t *lock = lock_addr(v); \
+ s64 val; \
+ \
+ raw_spin_lock_irqsave(lock, flags); \
+ val = v->counter; \
+ v->counter c_op a; \
+ raw_spin_unlock_irqrestore(lock, flags); \
+ return val; \
+} \
+EXPORT_SYMBOL(generic_atomic64_fetch_##op);
+
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_OP_RETURN(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +=)
+ATOMIC64_OPS(sub, -=)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op) \
+ ATOMIC64_OP(op, c_op) \
+ ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_OP
+
+s64 generic_atomic64_dec_if_positive(atomic64_t *v)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ s64 val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter - 1;
+ if (val >= 0)
+ v->counter = val;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
+
+s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ s64 val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ if (val == o)
+ v->counter = n;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+EXPORT_SYMBOL(generic_atomic64_cmpxchg);
+
+s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ s64 val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ v->counter = new;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return val;
+}
+EXPORT_SYMBOL(generic_atomic64_xchg);
+
+s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ unsigned long flags;
+ raw_spinlock_t *lock = lock_addr(v);
+ s64 val;
+
+ raw_spin_lock_irqsave(lock, flags);
+ val = v->counter;
+ if (val != u)
+ v->counter += a;
+ raw_spin_unlock_irqrestore(lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
new file mode 100644
index 000000000..d9d170238
--- /dev/null
+++ b/lib/atomic64_test.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Testsuite for atomic64_t functions
+ *
+ * Copyright © 2010 Luca Barbieri
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h> /* for boot_cpu_has below */
+#endif
+
+#define TEST(bit, op, c_op, val) \
+do { \
+ atomic##bit##_set(&v, v0); \
+ r = v0; \
+ atomic##bit##_##op(val, &v); \
+ r c_op val; \
+ WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \
+ (unsigned long long)atomic##bit##_read(&v), \
+ (unsigned long long)r); \
+} while (0)
+
+/*
+ * Test for a atomic operation family,
+ * @test should be a macro accepting parameters (bit, op, ...)
+ */
+
+#define FAMILY_TEST(test, bit, op, args...) \
+do { \
+ test(bit, op, ##args); \
+ test(bit, op##_acquire, ##args); \
+ test(bit, op##_release, ##args); \
+ test(bit, op##_relaxed, ##args); \
+} while (0)
+
+#define TEST_RETURN(bit, op, c_op, val) \
+do { \
+ atomic##bit##_set(&v, v0); \
+ r = v0; \
+ r c_op val; \
+ BUG_ON(atomic##bit##_##op(val, &v) != r); \
+ BUG_ON(atomic##bit##_read(&v) != r); \
+} while (0)
+
+#define TEST_FETCH(bit, op, c_op, val) \
+do { \
+ atomic##bit##_set(&v, v0); \
+ r = v0; \
+ r c_op val; \
+ BUG_ON(atomic##bit##_##op(val, &v) != v0); \
+ BUG_ON(atomic##bit##_read(&v) != r); \
+} while (0)
+
+#define RETURN_FAMILY_TEST(bit, op, c_op, val) \
+do { \
+ FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \
+} while (0)
+
+#define FETCH_FAMILY_TEST(bit, op, c_op, val) \
+do { \
+ FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \
+} while (0)
+
+#define TEST_ARGS(bit, op, init, ret, expect, args...) \
+do { \
+ atomic##bit##_set(&v, init); \
+ BUG_ON(atomic##bit##_##op(&v, ##args) != ret); \
+ BUG_ON(atomic##bit##_read(&v) != expect); \
+} while (0)
+
+#define XCHG_FAMILY_TEST(bit, init, new) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, xchg, init, init, new, new); \
+} while (0)
+
+#define CMPXCHG_FAMILY_TEST(bit, init, new, wrong) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \
+ init, init, new, init, new); \
+ FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \
+ init, init, init, wrong, new); \
+} while (0)
+
+#define INC_RETURN_FAMILY_TEST(bit, i) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, inc_return, \
+ i, (i) + one, (i) + one); \
+} while (0)
+
+#define DEC_RETURN_FAMILY_TEST(bit, i) \
+do { \
+ FAMILY_TEST(TEST_ARGS, bit, dec_return, \
+ i, (i) - one, (i) - one); \
+} while (0)
+
+static __init void test_atomic(void)
+{
+ int v0 = 0xaaa31337;
+ int v1 = 0xdeadbeef;
+ int onestwos = 0x11112222;
+ int one = 1;
+
+ atomic_t v;
+ int r;
+
+ TEST(, add, +=, onestwos);
+ TEST(, add, +=, -one);
+ TEST(, sub, -=, onestwos);
+ TEST(, sub, -=, -one);
+ TEST(, or, |=, v1);
+ TEST(, and, &=, v1);
+ TEST(, xor, ^=, v1);
+ TEST(, andnot, &= ~, v1);
+
+ RETURN_FAMILY_TEST(, add_return, +=, onestwos);
+ RETURN_FAMILY_TEST(, add_return, +=, -one);
+ RETURN_FAMILY_TEST(, sub_return, -=, onestwos);
+ RETURN_FAMILY_TEST(, sub_return, -=, -one);
+
+ FETCH_FAMILY_TEST(, fetch_add, +=, onestwos);
+ FETCH_FAMILY_TEST(, fetch_add, +=, -one);
+ FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos);
+ FETCH_FAMILY_TEST(, fetch_sub, -=, -one);
+
+ FETCH_FAMILY_TEST(, fetch_or, |=, v1);
+ FETCH_FAMILY_TEST(, fetch_and, &=, v1);
+ FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1);
+ FETCH_FAMILY_TEST(, fetch_xor, ^=, v1);
+
+ INC_RETURN_FAMILY_TEST(, v0);
+ DEC_RETURN_FAMILY_TEST(, v0);
+
+ XCHG_FAMILY_TEST(, v0, v1);
+ CMPXCHG_FAMILY_TEST(, v0, v1, onestwos);
+
+}
+
+#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
+static __init void test_atomic64(void)
+{
+ long long v0 = 0xaaa31337c001d00dLL;
+ long long v1 = 0xdeadbeefdeafcafeLL;
+ long long v2 = 0xfaceabadf00df001LL;
+ long long v3 = 0x8000000000000000LL;
+ long long onestwos = 0x1111111122222222LL;
+ long long one = 1LL;
+ int r_int;
+
+ atomic64_t v = ATOMIC64_INIT(v0);
+ long long r = v0;
+ BUG_ON(v.counter != r);
+
+ atomic64_set(&v, v1);
+ r = v1;
+ BUG_ON(v.counter != r);
+ BUG_ON(atomic64_read(&v) != r);
+
+ TEST(64, add, +=, onestwos);
+ TEST(64, add, +=, -one);
+ TEST(64, sub, -=, onestwos);
+ TEST(64, sub, -=, -one);
+ TEST(64, or, |=, v1);
+ TEST(64, and, &=, v1);
+ TEST(64, xor, ^=, v1);
+ TEST(64, andnot, &= ~, v1);
+
+ RETURN_FAMILY_TEST(64, add_return, +=, onestwos);
+ RETURN_FAMILY_TEST(64, add_return, +=, -one);
+ RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
+ RETURN_FAMILY_TEST(64, sub_return, -=, -one);
+
+ FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos);
+ FETCH_FAMILY_TEST(64, fetch_add, +=, -one);
+ FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos);
+ FETCH_FAMILY_TEST(64, fetch_sub, -=, -one);
+
+ FETCH_FAMILY_TEST(64, fetch_or, |=, v1);
+ FETCH_FAMILY_TEST(64, fetch_and, &=, v1);
+ FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1);
+ FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1);
+
+ INIT(v0);
+ atomic64_inc(&v);
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ atomic64_dec(&v);
+ r -= one;
+ BUG_ON(v.counter != r);
+
+ INC_RETURN_FAMILY_TEST(64, v0);
+ DEC_RETURN_FAMILY_TEST(64, v0);
+
+ XCHG_FAMILY_TEST(64, v0, v1);
+ CMPXCHG_FAMILY_TEST(64, v0, v1, v2);
+
+ INIT(v0);
+ BUG_ON(atomic64_add_unless(&v, one, v0));
+ BUG_ON(v.counter != r);
+
+ INIT(v0);
+ BUG_ON(!atomic64_add_unless(&v, one, v1));
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(onestwos);
+ BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
+ r -= one;
+ BUG_ON(v.counter != r);
+
+ INIT(0);
+ BUG_ON(atomic64_dec_if_positive(&v) != -one);
+ BUG_ON(v.counter != r);
+
+ INIT(-one);
+ BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
+ BUG_ON(v.counter != r);
+
+ INIT(onestwos);
+ BUG_ON(!atomic64_inc_not_zero(&v));
+ r += one;
+ BUG_ON(v.counter != r);
+
+ INIT(0);
+ BUG_ON(atomic64_inc_not_zero(&v));
+ BUG_ON(v.counter != r);
+
+ INIT(-one);
+ BUG_ON(!atomic64_inc_not_zero(&v));
+ r += one;
+ BUG_ON(v.counter != r);
+
+ /* Confirm the return value fits in an int, even if the value doesn't */
+ INIT(v3);
+ r_int = atomic64_inc_not_zero(&v);
+ BUG_ON(!r_int);
+}
+
+static __init int test_atomics_init(void)
+{
+ test_atomic();
+ test_atomic64();
+
+#ifdef CONFIG_X86
+ pr_info("passed for %s platform %s CX8 and %s SSE\n",
+#ifdef CONFIG_X86_64
+ "x86-64",
+#elif defined(CONFIG_X86_CMPXCHG64)
+ "i586+",
+#else
+ "i386+",
+#endif
+ boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
+ boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
+#else
+ pr_info("passed\n");
+#endif
+
+ return 0;
+}
+
+static __exit void test_atomics_exit(void) {}
+
+module_init(test_atomics_init);
+module_exit(test_atomics_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/audit.c b/lib/audit.c
new file mode 100644
index 000000000..738bda22d
--- /dev/null
+++ b/lib/audit.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+ if (audit_is_compat(arch))
+ return 1;
+ else
+ return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+ if (audit_is_compat(abi))
+ return audit_classify_compat_syscall(abi, syscall);
+
+ switch(syscall) {
+#ifdef __NR_open
+ case __NR_open:
+ return AUDITSC_OPEN;
+#endif
+#ifdef __NR_openat
+ case __NR_openat:
+ return AUDITSC_OPENAT;
+#endif
+#ifdef __NR_socketcall
+ case __NR_socketcall:
+ return AUDITSC_SOCKETCALL;
+#endif
+#ifdef __NR_execveat
+ case __NR_execveat:
+#endif
+ case __NR_execve:
+ return AUDITSC_EXECVE;
+#ifdef __NR_openat2
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
+#endif
+ default:
+ return AUDITSC_NATIVE;
+ }
+}
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+ audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
+ audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
+#endif
+ audit_register_class(AUDIT_CLASS_WRITE, write_class);
+ audit_register_class(AUDIT_CLASS_READ, read_class);
+ audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+ audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+ return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/lib/base64.c b/lib/base64.c
new file mode 100644
index 000000000..b736a7a43
--- /dev/null
+++ b/lib/base64.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64.c - RFC4648-compliant base64 encoding
+ *
+ * Copyright (c) 2020 Hannes Reinecke, SUSE
+ *
+ * Based on the base64url routines from fs/crypto/fname.c
+ * (which are using the URL-safe base64 encoding),
+ * modified to use the standard coding table from RFC4648 section 4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/base64.h>
+
+static const char base64_table[65] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+/**
+ * base64_encode() - base64-encode some binary data
+ * @src: the binary data to encode
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the base64-encoded string. Not NUL-terminated.
+ *
+ * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified
+ * by RFC 4648, including the '='-padding.
+ *
+ * Return: the length of the resulting base64-encoded string in bytes.
+ */
+int base64_encode(const u8 *src, int srclen, char *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ char *cp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ ac = (ac << 8) | src[i];
+ bits += 8;
+ do {
+ bits -= 6;
+ *cp++ = base64_table[(ac >> bits) & 0x3f];
+ } while (bits >= 6);
+ }
+ if (bits) {
+ *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
+ bits -= 6;
+ }
+ while (bits < 0) {
+ *cp++ = '=';
+ bits += 2;
+ }
+ return cp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_encode);
+
+/**
+ * base64_decode() - base64-decode a string
+ * @src: the string to decode. Doesn't need to be NUL-terminated.
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the decoded binary data
+ *
+ * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding"
+ * specified by RFC 4648, including the '='-padding.
+ *
+ * This implementation hasn't been optimized for performance.
+ *
+ * Return: the length of the resulting decoded binary data in bytes,
+ * or -1 if the string isn't a valid base64 string.
+ */
+int base64_decode(const char *src, int srclen, u8 *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ u8 *bp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ const char *p = strchr(base64_table, src[i]);
+
+ if (src[i] == '=') {
+ ac = (ac << 6);
+ bits += 6;
+ if (bits >= 8)
+ bits -= 8;
+ continue;
+ }
+ if (p == NULL || src[i] == 0)
+ return -1;
+ ac = (ac << 6) | (p - base64_table);
+ bits += 6;
+ if (bits >= 8) {
+ bits -= 8;
+ *bp++ = (u8)(ac >> bits);
+ }
+ }
+ if (ac & ((1 << bits) - 1))
+ return -1;
+ return bp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_decode);
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000..7e4750b6e
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bcd.h>
+#include <linux/export.h>
+
+unsigned _bcd2bin(unsigned char val)
+{
+ return (val & 0x0f) + (val >> 4) * 10;
+}
+EXPORT_SYMBOL(_bcd2bin);
+
+unsigned char _bin2bcd(unsigned val)
+{
+ return ((val / 10) << 4) + val % 10;
+}
+EXPORT_SYMBOL(_bin2bcd);
diff --git a/lib/bch.c b/lib/bch.c
new file mode 100644
index 000000000..5f71fd76e
--- /dev/null
+++ b/lib/bch.c
@@ -0,0 +1,1412 @@
+/*
+ * Generic binary BCH encoding/decoding library
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Copyright © 2011 Parrot S.A.
+ *
+ * Author: Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * Description:
+ *
+ * This library provides runtime configurable encoding/decoding of binary
+ * Bose-Chaudhuri-Hocquenghem (BCH) codes.
+ *
+ * Call bch_init to get a pointer to a newly allocated bch_control structure for
+ * the given m (Galois field order), t (error correction capability) and
+ * (optional) primitive polynomial parameters.
+ *
+ * Call bch_encode to compute and store ecc parity bytes to a given buffer.
+ * Call bch_decode to detect and locate errors in received data.
+ *
+ * On systems supporting hw BCH features, intermediate results may be provided
+ * to bch_decode in order to skip certain steps. See bch_decode() documentation
+ * for details.
+ *
+ * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of
+ * parameters m and t; thus allowing extra compiler optimizations and providing
+ * better (up to 2x) encoding performance. Using this option makes sense when
+ * (m,t) are fixed and known in advance, e.g. when using BCH error correction
+ * on a particular NAND flash device.
+ *
+ * Algorithmic details:
+ *
+ * Encoding is performed by processing 32 input bits in parallel, using 4
+ * remainder lookup tables.
+ *
+ * The final stage of decoding involves the following internal steps:
+ * a. Syndrome computation
+ * b. Error locator polynomial computation using Berlekamp-Massey algorithm
+ * c. Error locator root finding (by far the most expensive step)
+ *
+ * In this implementation, step c is not performed using the usual Chien search.
+ * Instead, an alternative approach described in [1] is used. It consists in
+ * factoring the error locator polynomial using the Berlekamp Trace algorithm
+ * (BTA) down to a certain degree (4), after which ad hoc low-degree polynomial
+ * solving techniques [2] are used. The resulting algorithm, called BTZ, yields
+ * much better performance than Chien search for usual (m,t) values (typically
+ * m >= 13, t < 32, see [1]).
+ *
+ * [1] B. Biswas, V. Herbert. Efficient root finding of polynomials over fields
+ * of characteristic 2, in: Western European Workshop on Research in Cryptology
+ * - WEWoRC 2009, Graz, Austria, LNCS, Springer, July 2009, to appear.
+ * [2] [Zin96] V.A. Zinoviev. On the solution of equations of degree 10 over
+ * finite fields GF(2^q). In Rapport de recherche INRIA no 2829, 1996.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/bitrev.h>
+#include <asm/byteorder.h>
+#include <linux/bch.h>
+
+#if defined(CONFIG_BCH_CONST_PARAMS)
+#define GF_M(_p) (CONFIG_BCH_CONST_M)
+#define GF_T(_p) (CONFIG_BCH_CONST_T)
+#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
+#define BCH_MAX_M (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T (CONFIG_BCH_CONST_T)
+#else
+#define GF_M(_p) ((_p)->m)
+#define GF_T(_p) ((_p)->t)
+#define GF_N(_p) ((_p)->n)
+#define BCH_MAX_M 15 /* 2KB */
+#define BCH_MAX_T 64 /* 64 bit correction */
+#endif
+
+#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
+#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
+
+#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
+
+#ifndef dbg
+#define dbg(_fmt, args...) do {} while (0)
+#endif
+
+/*
+ * represent a polynomial over GF(2^m)
+ */
+struct gf_poly {
+ unsigned int deg; /* polynomial degree */
+ unsigned int c[]; /* polynomial terms */
+};
+
+/* given its degree, compute a polynomial size in bytes */
+#define GF_POLY_SZ(_d) (sizeof(struct gf_poly)+((_d)+1)*sizeof(unsigned int))
+
+/* polynomial of degree 1 */
+struct gf_poly_deg1 {
+ struct gf_poly poly;
+ unsigned int c[2];
+};
+
+static u8 swap_bits(struct bch_control *bch, u8 in)
+{
+ if (!bch->swap_bits)
+ return in;
+
+ return bitrev8(in);
+}
+
+/*
+ * same as bch_encode(), but process input data one byte at a time
+ */
+static void bch_encode_unaligned(struct bch_control *bch,
+ const unsigned char *data, unsigned int len,
+ uint32_t *ecc)
+{
+ int i;
+ const uint32_t *p;
+ const int l = BCH_ECC_WORDS(bch)-1;
+
+ while (len--) {
+ u8 tmp = swap_bits(bch, *data++);
+
+ p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff);
+
+ for (i = 0; i < l; i++)
+ ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++);
+
+ ecc[l] = (ecc[l] << 8)^(*p);
+ }
+}
+
+/*
+ * convert ecc bytes to aligned, zero-padded 32-bit ecc words
+ */
+static void load_ecc8(struct bch_control *bch, uint32_t *dst,
+ const uint8_t *src)
+{
+ uint8_t pad[4] = {0, 0, 0, 0};
+ unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
+
+ for (i = 0; i < nwords; i++, src += 4)
+ dst[i] = ((u32)swap_bits(bch, src[0]) << 24) |
+ ((u32)swap_bits(bch, src[1]) << 16) |
+ ((u32)swap_bits(bch, src[2]) << 8) |
+ swap_bits(bch, src[3]);
+
+ memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords);
+ dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) |
+ ((u32)swap_bits(bch, pad[1]) << 16) |
+ ((u32)swap_bits(bch, pad[2]) << 8) |
+ swap_bits(bch, pad[3]);
+}
+
+/*
+ * convert 32-bit ecc words to ecc bytes
+ */
+static void store_ecc8(struct bch_control *bch, uint8_t *dst,
+ const uint32_t *src)
+{
+ uint8_t pad[4];
+ unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
+
+ for (i = 0; i < nwords; i++) {
+ *dst++ = swap_bits(bch, src[i] >> 24);
+ *dst++ = swap_bits(bch, src[i] >> 16);
+ *dst++ = swap_bits(bch, src[i] >> 8);
+ *dst++ = swap_bits(bch, src[i]);
+ }
+ pad[0] = swap_bits(bch, src[nwords] >> 24);
+ pad[1] = swap_bits(bch, src[nwords] >> 16);
+ pad[2] = swap_bits(bch, src[nwords] >> 8);
+ pad[3] = swap_bits(bch, src[nwords]);
+ memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords);
+}
+
+/**
+ * bch_encode - calculate BCH ecc parity of data
+ * @bch: BCH control structure
+ * @data: data to encode
+ * @len: data length in bytes
+ * @ecc: ecc parity data, must be initialized by caller
+ *
+ * The @ecc parity array is used both as input and output parameter, in order to
+ * allow incremental computations. It should be of the size indicated by member
+ * @ecc_bytes of @bch, and should be initialized to 0 before the first call.
+ *
+ * The exact number of computed ecc parity bits is given by member @ecc_bits of
+ * @bch; it may be less than m*t for large values of t.
+ */
+void bch_encode(struct bch_control *bch, const uint8_t *data,
+ unsigned int len, uint8_t *ecc)
+{
+ const unsigned int l = BCH_ECC_WORDS(bch)-1;
+ unsigned int i, mlen;
+ unsigned long m;
+ uint32_t w, r[BCH_ECC_MAX_WORDS];
+ const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r);
+ const uint32_t * const tab0 = bch->mod8_tab;
+ const uint32_t * const tab1 = tab0 + 256*(l+1);
+ const uint32_t * const tab2 = tab1 + 256*(l+1);
+ const uint32_t * const tab3 = tab2 + 256*(l+1);
+ const uint32_t *pdata, *p0, *p1, *p2, *p3;
+
+ if (WARN_ON(r_bytes > sizeof(r)))
+ return;
+
+ if (ecc) {
+ /* load ecc parity bytes into internal 32-bit buffer */
+ load_ecc8(bch, bch->ecc_buf, ecc);
+ } else {
+ memset(bch->ecc_buf, 0, r_bytes);
+ }
+
+ /* process first unaligned data bytes */
+ m = ((unsigned long)data) & 3;
+ if (m) {
+ mlen = (len < (4-m)) ? len : 4-m;
+ bch_encode_unaligned(bch, data, mlen, bch->ecc_buf);
+ data += mlen;
+ len -= mlen;
+ }
+
+ /* process 32-bit aligned data words */
+ pdata = (uint32_t *)data;
+ mlen = len/4;
+ data += 4*mlen;
+ len -= 4*mlen;
+ memcpy(r, bch->ecc_buf, r_bytes);
+
+ /*
+ * split each 32-bit word into 4 polynomials of weight 8 as follows:
+ *
+ * 31 ...24 23 ...16 15 ... 8 7 ... 0
+ * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt
+ * tttttttt mod g = r0 (precomputed)
+ * zzzzzzzz 00000000 mod g = r1 (precomputed)
+ * yyyyyyyy 00000000 00000000 mod g = r2 (precomputed)
+ * xxxxxxxx 00000000 00000000 00000000 mod g = r3 (precomputed)
+ * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt mod g = r0^r1^r2^r3
+ */
+ while (mlen--) {
+ /* input data is read in big-endian format */
+ w = cpu_to_be32(*pdata++);
+ if (bch->swap_bits)
+ w = (u32)swap_bits(bch, w) |
+ ((u32)swap_bits(bch, w >> 8) << 8) |
+ ((u32)swap_bits(bch, w >> 16) << 16) |
+ ((u32)swap_bits(bch, w >> 24) << 24);
+ w ^= r[0];
+ p0 = tab0 + (l+1)*((w >> 0) & 0xff);
+ p1 = tab1 + (l+1)*((w >> 8) & 0xff);
+ p2 = tab2 + (l+1)*((w >> 16) & 0xff);
+ p3 = tab3 + (l+1)*((w >> 24) & 0xff);
+
+ for (i = 0; i < l; i++)
+ r[i] = r[i+1]^p0[i]^p1[i]^p2[i]^p3[i];
+
+ r[l] = p0[l]^p1[l]^p2[l]^p3[l];
+ }
+ memcpy(bch->ecc_buf, r, r_bytes);
+
+ /* process last unaligned bytes */
+ if (len)
+ bch_encode_unaligned(bch, data, len, bch->ecc_buf);
+
+ /* store ecc parity bytes into original parity buffer */
+ if (ecc)
+ store_ecc8(bch, ecc, bch->ecc_buf);
+}
+EXPORT_SYMBOL_GPL(bch_encode);
+
+static inline int modulo(struct bch_control *bch, unsigned int v)
+{
+ const unsigned int n = GF_N(bch);
+ while (v >= n) {
+ v -= n;
+ v = (v & n) + (v >> GF_M(bch));
+ }
+ return v;
+}
+
+/*
+ * shorter and faster modulo function, only works when v < 2N.
+ */
+static inline int mod_s(struct bch_control *bch, unsigned int v)
+{
+ const unsigned int n = GF_N(bch);
+ return (v < n) ? v : v-n;
+}
+
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly)-1;
+}
+
+static inline int parity(unsigned int x)
+{
+ /*
+ * public domain code snippet, lifted from
+ * http://www-graphics.stanford.edu/~seander/bithacks.html
+ */
+ x ^= x >> 1;
+ x ^= x >> 2;
+ x = (x & 0x11111111U) * 0x11111111U;
+ return (x >> 28) & 1;
+}
+
+/* Galois field basic operations: multiply, divide, inverse, etc. */
+
+static inline unsigned int gf_mul(struct bch_control *bch, unsigned int a,
+ unsigned int b)
+{
+ return (a && b) ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+
+ bch->a_log_tab[b])] : 0;
+}
+
+static inline unsigned int gf_sqr(struct bch_control *bch, unsigned int a)
+{
+ return a ? bch->a_pow_tab[mod_s(bch, 2*bch->a_log_tab[a])] : 0;
+}
+
+static inline unsigned int gf_div(struct bch_control *bch, unsigned int a,
+ unsigned int b)
+{
+ return a ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+
+ GF_N(bch)-bch->a_log_tab[b])] : 0;
+}
+
+static inline unsigned int gf_inv(struct bch_control *bch, unsigned int a)
+{
+ return bch->a_pow_tab[GF_N(bch)-bch->a_log_tab[a]];
+}
+
+static inline unsigned int a_pow(struct bch_control *bch, int i)
+{
+ return bch->a_pow_tab[modulo(bch, i)];
+}
+
+static inline int a_log(struct bch_control *bch, unsigned int x)
+{
+ return bch->a_log_tab[x];
+}
+
+static inline int a_ilog(struct bch_control *bch, unsigned int x)
+{
+ return mod_s(bch, GF_N(bch)-bch->a_log_tab[x]);
+}
+
+/*
+ * compute 2t syndromes of ecc polynomial, i.e. ecc(a^j) for j=1..2t
+ */
+static void compute_syndromes(struct bch_control *bch, uint32_t *ecc,
+ unsigned int *syn)
+{
+ int i, j, s;
+ unsigned int m;
+ uint32_t poly;
+ const int t = GF_T(bch);
+
+ s = bch->ecc_bits;
+
+ /* make sure extra bits in last ecc word are cleared */
+ m = ((unsigned int)s) & 31;
+ if (m)
+ ecc[s/32] &= ~((1u << (32-m))-1);
+ memset(syn, 0, 2*t*sizeof(*syn));
+
+ /* compute v(a^j) for j=1 .. 2t-1 */
+ do {
+ poly = *ecc++;
+ s -= 32;
+ while (poly) {
+ i = deg(poly);
+ for (j = 0; j < 2*t; j += 2)
+ syn[j] ^= a_pow(bch, (j+1)*(i+s));
+
+ poly ^= (1 << i);
+ }
+ } while (s > 0);
+
+ /* v(a^(2j)) = v(a^j)^2 */
+ for (j = 0; j < t; j++)
+ syn[2*j+1] = gf_sqr(bch, syn[j]);
+}
+
+static void gf_poly_copy(struct gf_poly *dst, struct gf_poly *src)
+{
+ memcpy(dst, src, GF_POLY_SZ(src->deg));
+}
+
+static int compute_error_locator_polynomial(struct bch_control *bch,
+ const unsigned int *syn)
+{
+ const unsigned int t = GF_T(bch);
+ const unsigned int n = GF_N(bch);
+ unsigned int i, j, tmp, l, pd = 1, d = syn[0];
+ struct gf_poly *elp = bch->elp;
+ struct gf_poly *pelp = bch->poly_2t[0];
+ struct gf_poly *elp_copy = bch->poly_2t[1];
+ int k, pp = -1;
+
+ memset(pelp, 0, GF_POLY_SZ(2*t));
+ memset(elp, 0, GF_POLY_SZ(2*t));
+
+ pelp->deg = 0;
+ pelp->c[0] = 1;
+ elp->deg = 0;
+ elp->c[0] = 1;
+
+ /* use simplified binary Berlekamp-Massey algorithm */
+ for (i = 0; (i < t) && (elp->deg <= t); i++) {
+ if (d) {
+ k = 2*i-pp;
+ gf_poly_copy(elp_copy, elp);
+ /* e[i+1](X) = e[i](X)+di*dp^-1*X^2(i-p)*e[p](X) */
+ tmp = a_log(bch, d)+n-a_log(bch, pd);
+ for (j = 0; j <= pelp->deg; j++) {
+ if (pelp->c[j]) {
+ l = a_log(bch, pelp->c[j]);
+ elp->c[j+k] ^= a_pow(bch, tmp+l);
+ }
+ }
+ /* compute l[i+1] = max(l[i]->c[l[p]+2*(i-p]) */
+ tmp = pelp->deg+k;
+ if (tmp > elp->deg) {
+ elp->deg = tmp;
+ gf_poly_copy(pelp, elp_copy);
+ pd = d;
+ pp = 2*i;
+ }
+ }
+ /* di+1 = S(2i+3)+elp[i+1].1*S(2i+2)+...+elp[i+1].lS(2i+3-l) */
+ if (i < t-1) {
+ d = syn[2*i+2];
+ for (j = 1; j <= elp->deg; j++)
+ d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]);
+ }
+ }
+ dbg("elp=%s\n", gf_poly_str(elp));
+ return (elp->deg > t) ? -1 : (int)elp->deg;
+}
+
+/*
+ * solve a m x m linear system in GF(2) with an expected number of solutions,
+ * and return the number of found solutions
+ */
+static int solve_linear_system(struct bch_control *bch, unsigned int *rows,
+ unsigned int *sol, int nsol)
+{
+ const int m = GF_M(bch);
+ unsigned int tmp, mask;
+ int rem, c, r, p, k, param[BCH_MAX_M];
+
+ k = 0;
+ mask = 1 << m;
+
+ /* Gaussian elimination */
+ for (c = 0; c < m; c++) {
+ rem = 0;
+ p = c-k;
+ /* find suitable row for elimination */
+ for (r = p; r < m; r++) {
+ if (rows[r] & mask) {
+ if (r != p) {
+ tmp = rows[r];
+ rows[r] = rows[p];
+ rows[p] = tmp;
+ }
+ rem = r+1;
+ break;
+ }
+ }
+ if (rem) {
+ /* perform elimination on remaining rows */
+ tmp = rows[p];
+ for (r = rem; r < m; r++) {
+ if (rows[r] & mask)
+ rows[r] ^= tmp;
+ }
+ } else {
+ /* elimination not needed, store defective row index */
+ param[k++] = c;
+ }
+ mask >>= 1;
+ }
+ /* rewrite system, inserting fake parameter rows */
+ if (k > 0) {
+ p = k;
+ for (r = m-1; r >= 0; r--) {
+ if ((r > m-1-k) && rows[r])
+ /* system has no solution */
+ return 0;
+
+ rows[r] = (p && (r == param[p-1])) ?
+ p--, 1u << (m-r) : rows[r-p];
+ }
+ }
+
+ if (nsol != (1 << k))
+ /* unexpected number of solutions */
+ return 0;
+
+ for (p = 0; p < nsol; p++) {
+ /* set parameters for p-th solution */
+ for (c = 0; c < k; c++)
+ rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1);
+
+ /* compute unique solution */
+ tmp = 0;
+ for (r = m-1; r >= 0; r--) {
+ mask = rows[r] & (tmp|1);
+ tmp |= parity(mask) << (m-r);
+ }
+ sol[p] = tmp >> 1;
+ }
+ return nsol;
+}
+
+/*
+ * this function builds and solves a linear system for finding roots of a degree
+ * 4 affine monic polynomial X^4+aX^2+bX+c over GF(2^m).
+ */
+static int find_affine4_roots(struct bch_control *bch, unsigned int a,
+ unsigned int b, unsigned int c,
+ unsigned int *roots)
+{
+ int i, j, k;
+ const int m = GF_M(bch);
+ unsigned int mask = 0xff, t, rows[16] = {0,};
+
+ j = a_log(bch, b);
+ k = a_log(bch, a);
+ rows[0] = c;
+
+ /* build linear system to solve X^4+aX^2+bX+c = 0 */
+ for (i = 0; i < m; i++) {
+ rows[i+1] = bch->a_pow_tab[4*i]^
+ (a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^
+ (b ? bch->a_pow_tab[mod_s(bch, j)] : 0);
+ j++;
+ k += 2;
+ }
+ /*
+ * transpose 16x16 matrix before passing it to linear solver
+ * warning: this code assumes m < 16
+ */
+ for (j = 8; j != 0; j >>= 1, mask ^= (mask << j)) {
+ for (k = 0; k < 16; k = (k+j+1) & ~j) {
+ t = ((rows[k] >> j)^rows[k+j]) & mask;
+ rows[k] ^= (t << j);
+ rows[k+j] ^= t;
+ }
+ }
+ return solve_linear_system(bch, rows, roots, 4);
+}
+
+/*
+ * compute root r of a degree 1 polynomial over GF(2^m) (returned as log(1/r))
+ */
+static int find_poly_deg1_roots(struct bch_control *bch, struct gf_poly *poly,
+ unsigned int *roots)
+{
+ int n = 0;
+
+ if (poly->c[0])
+ /* poly[X] = bX+c with c!=0, root=c/b */
+ roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+
+ bch->a_log_tab[poly->c[1]]);
+ return n;
+}
+
+/*
+ * compute roots of a degree 2 polynomial over GF(2^m)
+ */
+static int find_poly_deg2_roots(struct bch_control *bch, struct gf_poly *poly,
+ unsigned int *roots)
+{
+ int n = 0, i, l0, l1, l2;
+ unsigned int u, v, r;
+
+ if (poly->c[0] && poly->c[1]) {
+
+ l0 = bch->a_log_tab[poly->c[0]];
+ l1 = bch->a_log_tab[poly->c[1]];
+ l2 = bch->a_log_tab[poly->c[2]];
+
+ /* using z=a/bX, transform aX^2+bX+c into z^2+z+u (u=ac/b^2) */
+ u = a_pow(bch, l0+l2+2*(GF_N(bch)-l1));
+ /*
+ * let u = sum(li.a^i) i=0..m-1; then compute r = sum(li.xi):
+ * r^2+r = sum(li.(xi^2+xi)) = sum(li.(a^i+Tr(a^i).a^k)) =
+ * u + sum(li.Tr(a^i).a^k) = u+a^k.Tr(sum(li.a^i)) = u+a^k.Tr(u)
+ * i.e. r and r+1 are roots iff Tr(u)=0
+ */
+ r = 0;
+ v = u;
+ while (v) {
+ i = deg(v);
+ r ^= bch->xi_tab[i];
+ v ^= (1 << i);
+ }
+ /* verify root */
+ if ((gf_sqr(bch, r)^r) == u) {
+ /* reverse z=a/bX transformation and compute log(1/r) */
+ roots[n++] = modulo(bch, 2*GF_N(bch)-l1-
+ bch->a_log_tab[r]+l2);
+ roots[n++] = modulo(bch, 2*GF_N(bch)-l1-
+ bch->a_log_tab[r^1]+l2);
+ }
+ }
+ return n;
+}
+
+/*
+ * compute roots of a degree 3 polynomial over GF(2^m)
+ */
+static int find_poly_deg3_roots(struct bch_control *bch, struct gf_poly *poly,
+ unsigned int *roots)
+{
+ int i, n = 0;
+ unsigned int a, b, c, a2, b2, c2, e3, tmp[4];
+
+ if (poly->c[0]) {
+ /* transform polynomial into monic X^3 + a2X^2 + b2X + c2 */
+ e3 = poly->c[3];
+ c2 = gf_div(bch, poly->c[0], e3);
+ b2 = gf_div(bch, poly->c[1], e3);
+ a2 = gf_div(bch, poly->c[2], e3);
+
+ /* (X+a2)(X^3+a2X^2+b2X+c2) = X^4+aX^2+bX+c (affine) */
+ c = gf_mul(bch, a2, c2); /* c = a2c2 */
+ b = gf_mul(bch, a2, b2)^c2; /* b = a2b2 + c2 */
+ a = gf_sqr(bch, a2)^b2; /* a = a2^2 + b2 */
+
+ /* find the 4 roots of this affine polynomial */
+ if (find_affine4_roots(bch, a, b, c, tmp) == 4) {
+ /* remove a2 from final list of roots */
+ for (i = 0; i < 4; i++) {
+ if (tmp[i] != a2)
+ roots[n++] = a_ilog(bch, tmp[i]);
+ }
+ }
+ }
+ return n;
+}
+
+/*
+ * compute roots of a degree 4 polynomial over GF(2^m)
+ */
+static int find_poly_deg4_roots(struct bch_control *bch, struct gf_poly *poly,
+ unsigned int *roots)
+{
+ int i, l, n = 0;
+ unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4;
+
+ if (poly->c[0] == 0)
+ return 0;
+
+ /* transform polynomial into monic X^4 + aX^3 + bX^2 + cX + d */
+ e4 = poly->c[4];
+ d = gf_div(bch, poly->c[0], e4);
+ c = gf_div(bch, poly->c[1], e4);
+ b = gf_div(bch, poly->c[2], e4);
+ a = gf_div(bch, poly->c[3], e4);
+
+ /* use Y=1/X transformation to get an affine polynomial */
+ if (a) {
+ /* first, eliminate cX by using z=X+e with ae^2+c=0 */
+ if (c) {
+ /* compute e such that e^2 = c/a */
+ f = gf_div(bch, c, a);
+ l = a_log(bch, f);
+ l += (l & 1) ? GF_N(bch) : 0;
+ e = a_pow(bch, l/2);
+ /*
+ * use transformation z=X+e:
+ * z^4+e^4 + a(z^3+ez^2+e^2z+e^3) + b(z^2+e^2) +cz+ce+d
+ * z^4 + az^3 + (ae+b)z^2 + (ae^2+c)z+e^4+be^2+ae^3+ce+d
+ * z^4 + az^3 + (ae+b)z^2 + e^4+be^2+d
+ * z^4 + az^3 + b'z^2 + d'
+ */
+ d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d;
+ b = gf_mul(bch, a, e)^b;
+ }
+ /* now, use Y=1/X to get Y^4 + b/dY^2 + a/dY + 1/d */
+ if (d == 0)
+ /* assume all roots have multiplicity 1 */
+ return 0;
+
+ c2 = gf_inv(bch, d);
+ b2 = gf_div(bch, a, d);
+ a2 = gf_div(bch, b, d);
+ } else {
+ /* polynomial is already affine */
+ c2 = d;
+ b2 = c;
+ a2 = b;
+ }
+ /* find the 4 roots of this affine polynomial */
+ if (find_affine4_roots(bch, a2, b2, c2, roots) == 4) {
+ for (i = 0; i < 4; i++) {
+ /* post-process roots (reverse transformations) */
+ f = a ? gf_inv(bch, roots[i]) : roots[i];
+ roots[i] = a_ilog(bch, f^e);
+ }
+ n = 4;
+ }
+ return n;
+}
+
+/*
+ * build monic, log-based representation of a polynomial
+ */
+static void gf_poly_logrep(struct bch_control *bch,
+ const struct gf_poly *a, int *rep)
+{
+ int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]);
+
+ /* represent 0 values with -1; warning, rep[d] is not set to 1 */
+ for (i = 0; i < d; i++)
+ rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1;
+}
+
+/*
+ * compute polynomial Euclidean division remainder in GF(2^m)[X]
+ */
+static void gf_poly_mod(struct bch_control *bch, struct gf_poly *a,
+ const struct gf_poly *b, int *rep)
+{
+ int la, p, m;
+ unsigned int i, j, *c = a->c;
+ const unsigned int d = b->deg;
+
+ if (a->deg < d)
+ return;
+
+ /* reuse or compute log representation of denominator */
+ if (!rep) {
+ rep = bch->cache;
+ gf_poly_logrep(bch, b, rep);
+ }
+
+ for (j = a->deg; j >= d; j--) {
+ if (c[j]) {
+ la = a_log(bch, c[j]);
+ p = j-d;
+ for (i = 0; i < d; i++, p++) {
+ m = rep[i];
+ if (m >= 0)
+ c[p] ^= bch->a_pow_tab[mod_s(bch,
+ m+la)];
+ }
+ }
+ }
+ a->deg = d-1;
+ while (!c[a->deg] && a->deg)
+ a->deg--;
+}
+
+/*
+ * compute polynomial Euclidean division quotient in GF(2^m)[X]
+ */
+static void gf_poly_div(struct bch_control *bch, struct gf_poly *a,
+ const struct gf_poly *b, struct gf_poly *q)
+{
+ if (a->deg >= b->deg) {
+ q->deg = a->deg-b->deg;
+ /* compute a mod b (modifies a) */
+ gf_poly_mod(bch, a, b, NULL);
+ /* quotient is stored in upper part of polynomial a */
+ memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int));
+ } else {
+ q->deg = 0;
+ q->c[0] = 0;
+ }
+}
+
+/*
+ * compute polynomial GCD (Greatest Common Divisor) in GF(2^m)[X]
+ */
+static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a,
+ struct gf_poly *b)
+{
+ struct gf_poly *tmp;
+
+ dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b));
+
+ if (a->deg < b->deg) {
+ tmp = b;
+ b = a;
+ a = tmp;
+ }
+
+ while (b->deg > 0) {
+ gf_poly_mod(bch, a, b, NULL);
+ tmp = b;
+ b = a;
+ a = tmp;
+ }
+
+ dbg("%s\n", gf_poly_str(a));
+
+ return a;
+}
+
+/*
+ * Given a polynomial f and an integer k, compute Tr(a^kX) mod f
+ * This is used in Berlekamp Trace algorithm for splitting polynomials
+ */
+static void compute_trace_bk_mod(struct bch_control *bch, int k,
+ const struct gf_poly *f, struct gf_poly *z,
+ struct gf_poly *out)
+{
+ const int m = GF_M(bch);
+ int i, j;
+
+ /* z contains z^2j mod f */
+ z->deg = 1;
+ z->c[0] = 0;
+ z->c[1] = bch->a_pow_tab[k];
+
+ out->deg = 0;
+ memset(out, 0, GF_POLY_SZ(f->deg));
+
+ /* compute f log representation only once */
+ gf_poly_logrep(bch, f, bch->cache);
+
+ for (i = 0; i < m; i++) {
+ /* add a^(k*2^i)(z^(2^i) mod f) and compute (z^(2^i) mod f)^2 */
+ for (j = z->deg; j >= 0; j--) {
+ out->c[j] ^= z->c[j];
+ z->c[2*j] = gf_sqr(bch, z->c[j]);
+ z->c[2*j+1] = 0;
+ }
+ if (z->deg > out->deg)
+ out->deg = z->deg;
+
+ if (i < m-1) {
+ z->deg *= 2;
+ /* z^(2(i+1)) mod f = (z^(2^i) mod f)^2 mod f */
+ gf_poly_mod(bch, z, f, bch->cache);
+ }
+ }
+ while (!out->c[out->deg] && out->deg)
+ out->deg--;
+
+ dbg("Tr(a^%d.X) mod f = %s\n", k, gf_poly_str(out));
+}
+
+/*
+ * factor a polynomial using Berlekamp Trace algorithm (BTA)
+ */
+static void factor_polynomial(struct bch_control *bch, int k, struct gf_poly *f,
+ struct gf_poly **g, struct gf_poly **h)
+{
+ struct gf_poly *f2 = bch->poly_2t[0];
+ struct gf_poly *q = bch->poly_2t[1];
+ struct gf_poly *tk = bch->poly_2t[2];
+ struct gf_poly *z = bch->poly_2t[3];
+ struct gf_poly *gcd;
+
+ dbg("factoring %s...\n", gf_poly_str(f));
+
+ *g = f;
+ *h = NULL;
+
+ /* tk = Tr(a^k.X) mod f */
+ compute_trace_bk_mod(bch, k, f, z, tk);
+
+ if (tk->deg > 0) {
+ /* compute g = gcd(f, tk) (destructive operation) */
+ gf_poly_copy(f2, f);
+ gcd = gf_poly_gcd(bch, f2, tk);
+ if (gcd->deg < f->deg) {
+ /* compute h=f/gcd(f,tk); this will modify f and q */
+ gf_poly_div(bch, f, gcd, q);
+ /* store g and h in-place (clobbering f) */
+ *h = &((struct gf_poly_deg1 *)f)[gcd->deg].poly;
+ gf_poly_copy(*g, gcd);
+ gf_poly_copy(*h, q);
+ }
+ }
+}
+
+/*
+ * find roots of a polynomial, using BTZ algorithm; see the beginning of this
+ * file for details
+ */
+static int find_poly_roots(struct bch_control *bch, unsigned int k,
+ struct gf_poly *poly, unsigned int *roots)
+{
+ int cnt;
+ struct gf_poly *f1, *f2;
+
+ switch (poly->deg) {
+ /* handle low degree polynomials with ad hoc techniques */
+ case 1:
+ cnt = find_poly_deg1_roots(bch, poly, roots);
+ break;
+ case 2:
+ cnt = find_poly_deg2_roots(bch, poly, roots);
+ break;
+ case 3:
+ cnt = find_poly_deg3_roots(bch, poly, roots);
+ break;
+ case 4:
+ cnt = find_poly_deg4_roots(bch, poly, roots);
+ break;
+ default:
+ /* factor polynomial using Berlekamp Trace Algorithm (BTA) */
+ cnt = 0;
+ if (poly->deg && (k <= GF_M(bch))) {
+ factor_polynomial(bch, k, poly, &f1, &f2);
+ if (f1)
+ cnt += find_poly_roots(bch, k+1, f1, roots);
+ if (f2)
+ cnt += find_poly_roots(bch, k+1, f2, roots+cnt);
+ }
+ break;
+ }
+ return cnt;
+}
+
+#if defined(USE_CHIEN_SEARCH)
+/*
+ * exhaustive root search (Chien) implementation - not used, included only for
+ * reference/comparison tests
+ */
+static int chien_search(struct bch_control *bch, unsigned int len,
+ struct gf_poly *p, unsigned int *roots)
+{
+ int m;
+ unsigned int i, j, syn, syn0, count = 0;
+ const unsigned int k = 8*len+bch->ecc_bits;
+
+ /* use a log-based representation of polynomial */
+ gf_poly_logrep(bch, p, bch->cache);
+ bch->cache[p->deg] = 0;
+ syn0 = gf_div(bch, p->c[0], p->c[p->deg]);
+
+ for (i = GF_N(bch)-k+1; i <= GF_N(bch); i++) {
+ /* compute elp(a^i) */
+ for (j = 1, syn = syn0; j <= p->deg; j++) {
+ m = bch->cache[j];
+ if (m >= 0)
+ syn ^= a_pow(bch, m+j*i);
+ }
+ if (syn == 0) {
+ roots[count++] = GF_N(bch)-i;
+ if (count == p->deg)
+ break;
+ }
+ }
+ return (count == p->deg) ? count : 0;
+}
+#define find_poly_roots(_p, _k, _elp, _loc) chien_search(_p, len, _elp, _loc)
+#endif /* USE_CHIEN_SEARCH */
+
+/**
+ * bch_decode - decode received codeword and find bit error locations
+ * @bch: BCH control structure
+ * @data: received data, ignored if @calc_ecc is provided
+ * @len: data length in bytes, must always be provided
+ * @recv_ecc: received ecc, if NULL then assume it was XORed in @calc_ecc
+ * @calc_ecc: calculated ecc, if NULL then calc_ecc is computed from @data
+ * @syn: hw computed syndrome data (if NULL, syndrome is calculated)
+ * @errloc: output array of error locations
+ *
+ * Returns:
+ * The number of errors found, or -EBADMSG if decoding failed, or -EINVAL if
+ * invalid parameters were provided
+ *
+ * Depending on the available hw BCH support and the need to compute @calc_ecc
+ * separately (using bch_encode()), this function should be called with one of
+ * the following parameter configurations -
+ *
+ * by providing @data and @recv_ecc only:
+ * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
+ *
+ * by providing @recv_ecc and @calc_ecc:
+ * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
+ *
+ * by providing ecc = recv_ecc XOR calc_ecc:
+ * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
+ *
+ * by providing syndrome results @syn:
+ * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
+ *
+ * Once bch_decode() has successfully returned with a positive value, error
+ * locations returned in array @errloc should be interpreted as follows -
+ *
+ * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for
+ * data correction)
+ *
+ * if (errloc[n] < 8*len), then n-th error is located in data and can be
+ * corrected with statement data[errloc[n]/8] ^= 1 << (errloc[n] % 8);
+ *
+ * Note that this function does not perform any data correction by itself, it
+ * merely indicates error locations.
+ */
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
+ const uint8_t *recv_ecc, const uint8_t *calc_ecc,
+ const unsigned int *syn, unsigned int *errloc)
+{
+ const unsigned int ecc_words = BCH_ECC_WORDS(bch);
+ unsigned int nbits;
+ int i, err, nroots;
+ uint32_t sum;
+
+ /* sanity check: make sure data length can be handled */
+ if (8*len > (bch->n-bch->ecc_bits))
+ return -EINVAL;
+
+ /* if caller does not provide syndromes, compute them */
+ if (!syn) {
+ if (!calc_ecc) {
+ /* compute received data ecc into an internal buffer */
+ if (!data || !recv_ecc)
+ return -EINVAL;
+ bch_encode(bch, data, len, NULL);
+ } else {
+ /* load provided calculated ecc */
+ load_ecc8(bch, bch->ecc_buf, calc_ecc);
+ }
+ /* load received ecc or assume it was XORed in calc_ecc */
+ if (recv_ecc) {
+ load_ecc8(bch, bch->ecc_buf2, recv_ecc);
+ /* XOR received and calculated ecc */
+ for (i = 0, sum = 0; i < (int)ecc_words; i++) {
+ bch->ecc_buf[i] ^= bch->ecc_buf2[i];
+ sum |= bch->ecc_buf[i];
+ }
+ if (!sum)
+ /* no error found */
+ return 0;
+ }
+ compute_syndromes(bch, bch->ecc_buf, bch->syn);
+ syn = bch->syn;
+ }
+
+ err = compute_error_locator_polynomial(bch, syn);
+ if (err > 0) {
+ nroots = find_poly_roots(bch, 1, bch->elp, errloc);
+ if (err != nroots)
+ err = -1;
+ }
+ if (err > 0) {
+ /* post-process raw error locations for easier correction */
+ nbits = (len*8)+bch->ecc_bits;
+ for (i = 0; i < err; i++) {
+ if (errloc[i] >= nbits) {
+ err = -1;
+ break;
+ }
+ errloc[i] = nbits-1-errloc[i];
+ if (!bch->swap_bits)
+ errloc[i] = (errloc[i] & ~7) |
+ (7-(errloc[i] & 7));
+ }
+ }
+ return (err >= 0) ? err : -EBADMSG;
+}
+EXPORT_SYMBOL_GPL(bch_decode);
+
+/*
+ * generate Galois field lookup tables
+ */
+static int build_gf_tables(struct bch_control *bch, unsigned int poly)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = 1 << deg(poly);
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << GF_M(bch)))
+ return -1;
+
+ for (i = 0; i < GF_N(bch); i++) {
+ bch->a_pow_tab[i] = x;
+ bch->a_log_tab[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -1;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+ bch->a_pow_tab[GF_N(bch)] = 1;
+ bch->a_log_tab[0] = 0;
+
+ return 0;
+}
+
+/*
+ * compute generator polynomial remainder tables for fast encoding
+ */
+static void build_mod8_tables(struct bch_control *bch, const uint32_t *g)
+{
+ int i, j, b, d;
+ uint32_t data, hi, lo, *tab;
+ const int l = BCH_ECC_WORDS(bch);
+ const int plen = DIV_ROUND_UP(bch->ecc_bits+1, 32);
+ const int ecclen = DIV_ROUND_UP(bch->ecc_bits, 32);
+
+ memset(bch->mod8_tab, 0, 4*256*l*sizeof(*bch->mod8_tab));
+
+ for (i = 0; i < 256; i++) {
+ /* p(X)=i is a small polynomial of weight <= 8 */
+ for (b = 0; b < 4; b++) {
+ /* we want to compute (p(X).X^(8*b+deg(g))) mod g(X) */
+ tab = bch->mod8_tab + (b*256+i)*l;
+ data = i << (8*b);
+ while (data) {
+ d = deg(data);
+ /* subtract X^d.g(X) from p(X).X^(8*b+deg(g)) */
+ data ^= g[0] >> (31-d);
+ for (j = 0; j < ecclen; j++) {
+ hi = (d < 31) ? g[j] << (d+1) : 0;
+ lo = (j+1 < plen) ?
+ g[j+1] >> (31-d) : 0;
+ tab[j] ^= hi|lo;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * build a base for factoring degree 2 polynomials
+ */
+static int build_deg2_base(struct bch_control *bch)
+{
+ const int m = GF_M(bch);
+ int i, j, r;
+ unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M];
+
+ /* find k s.t. Tr(a^k) = 1 and 0 <= k < m */
+ for (i = 0; i < m; i++) {
+ for (j = 0, sum = 0; j < m; j++)
+ sum ^= a_pow(bch, i*(1 << j));
+
+ if (sum) {
+ ak = bch->a_pow_tab[i];
+ break;
+ }
+ }
+ /* find xi, i=0..m-1 such that xi^2+xi = a^i+Tr(a^i).a^k */
+ remaining = m;
+ memset(xi, 0, sizeof(xi));
+
+ for (x = 0; (x <= GF_N(bch)) && remaining; x++) {
+ y = gf_sqr(bch, x)^x;
+ for (i = 0; i < 2; i++) {
+ r = a_log(bch, y);
+ if (y && (r < m) && !xi[r]) {
+ bch->xi_tab[r] = x;
+ xi[r] = 1;
+ remaining--;
+ dbg("x%d = %x\n", r, x);
+ break;
+ }
+ y ^= ak;
+ }
+ }
+ /* should not happen but check anyway */
+ return remaining ? -1 : 0;
+}
+
+static void *bch_alloc(size_t size, int *err)
+{
+ void *ptr;
+
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (ptr == NULL)
+ *err = 1;
+ return ptr;
+}
+
+/*
+ * compute generator polynomial for given (m,t) parameters.
+ */
+static uint32_t *compute_generator_polynomial(struct bch_control *bch)
+{
+ const unsigned int m = GF_M(bch);
+ const unsigned int t = GF_T(bch);
+ int n, err = 0;
+ unsigned int i, j, nbits, r, word, *roots;
+ struct gf_poly *g;
+ uint32_t *genpoly;
+
+ g = bch_alloc(GF_POLY_SZ(m*t), &err);
+ roots = bch_alloc((bch->n+1)*sizeof(*roots), &err);
+ genpoly = bch_alloc(DIV_ROUND_UP(m*t+1, 32)*sizeof(*genpoly), &err);
+
+ if (err) {
+ kfree(genpoly);
+ genpoly = NULL;
+ goto finish;
+ }
+
+ /* enumerate all roots of g(X) */
+ memset(roots , 0, (bch->n+1)*sizeof(*roots));
+ for (i = 0; i < t; i++) {
+ for (j = 0, r = 2*i+1; j < m; j++) {
+ roots[r] = 1;
+ r = mod_s(bch, 2*r);
+ }
+ }
+ /* build generator polynomial g(X) */
+ g->deg = 0;
+ g->c[0] = 1;
+ for (i = 0; i < GF_N(bch); i++) {
+ if (roots[i]) {
+ /* multiply g(X) by (X+root) */
+ r = bch->a_pow_tab[i];
+ g->c[g->deg+1] = 1;
+ for (j = g->deg; j > 0; j--)
+ g->c[j] = gf_mul(bch, g->c[j], r)^g->c[j-1];
+
+ g->c[0] = gf_mul(bch, g->c[0], r);
+ g->deg++;
+ }
+ }
+ /* store left-justified binary representation of g(X) */
+ n = g->deg+1;
+ i = 0;
+
+ while (n > 0) {
+ nbits = (n > 32) ? 32 : n;
+ for (j = 0, word = 0; j < nbits; j++) {
+ if (g->c[n-1-j])
+ word |= 1u << (31-j);
+ }
+ genpoly[i++] = word;
+ n -= nbits;
+ }
+ bch->ecc_bits = g->deg;
+
+finish:
+ kfree(g);
+ kfree(roots);
+
+ return genpoly;
+}
+
+/**
+ * bch_init - initialize a BCH encoder/decoder
+ * @m: Galois field order, should be in the range 5-15
+ * @t: maximum error correction capability, in bits
+ * @prim_poly: user-provided primitive polynomial (or 0 to use default)
+ * @swap_bits: swap bits within data and syndrome bytes
+ *
+ * Returns:
+ * a newly allocated BCH control structure if successful, NULL otherwise
+ *
+ * This initialization can take some time, as lookup tables are built for fast
+ * encoding/decoding; make sure not to call this function from a time critical
+ * path. Usually, bch_init() should be called on module/driver init and
+ * bch_free() should be called to release memory on exit.
+ *
+ * You may provide your own primitive polynomial of degree @m in argument
+ * @prim_poly, or let bch_init() use its default polynomial.
+ *
+ * Once bch_init() has successfully returned a pointer to a newly allocated
+ * BCH control structure, ecc length in bytes is given by member @ecc_bytes of
+ * the structure.
+ */
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits)
+{
+ int err = 0;
+ unsigned int i, words;
+ uint32_t *genpoly;
+ struct bch_control *bch = NULL;
+
+ const int min_m = 5;
+
+ /* default primitive polynomials */
+ static const unsigned int prim_poly_tab[] = {
+ 0x25, 0x43, 0x83, 0x11d, 0x211, 0x409, 0x805, 0x1053, 0x201b,
+ 0x402b, 0x8003,
+ };
+
+#if defined(CONFIG_BCH_CONST_PARAMS)
+ if ((m != (CONFIG_BCH_CONST_M)) || (t != (CONFIG_BCH_CONST_T))) {
+ printk(KERN_ERR "bch encoder/decoder was configured to support "
+ "parameters m=%d, t=%d only!\n",
+ CONFIG_BCH_CONST_M, CONFIG_BCH_CONST_T);
+ goto fail;
+ }
+#endif
+ if ((m < min_m) || (m > BCH_MAX_M))
+ /*
+ * values of m greater than 15 are not currently supported;
+ * supporting m > 15 would require changing table base type
+ * (uint16_t) and a small patch in matrix transposition
+ */
+ goto fail;
+
+ if (t > BCH_MAX_T)
+ /*
+ * we can support larger than 64 bits if necessary, at the
+ * cost of higher stack usage.
+ */
+ goto fail;
+
+ /* sanity checks */
+ if ((t < 1) || (m*t >= ((1 << m)-1)))
+ /* invalid t value */
+ goto fail;
+
+ /* select a primitive polynomial for generating GF(2^m) */
+ if (prim_poly == 0)
+ prim_poly = prim_poly_tab[m-min_m];
+
+ bch = kzalloc(sizeof(*bch), GFP_KERNEL);
+ if (bch == NULL)
+ goto fail;
+
+ bch->m = m;
+ bch->t = t;
+ bch->n = (1 << m)-1;
+ words = DIV_ROUND_UP(m*t, 32);
+ bch->ecc_bytes = DIV_ROUND_UP(m*t, 8);
+ bch->a_pow_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_pow_tab), &err);
+ bch->a_log_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_log_tab), &err);
+ bch->mod8_tab = bch_alloc(words*1024*sizeof(*bch->mod8_tab), &err);
+ bch->ecc_buf = bch_alloc(words*sizeof(*bch->ecc_buf), &err);
+ bch->ecc_buf2 = bch_alloc(words*sizeof(*bch->ecc_buf2), &err);
+ bch->xi_tab = bch_alloc(m*sizeof(*bch->xi_tab), &err);
+ bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err);
+ bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err);
+ bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err);
+ bch->swap_bits = swap_bits;
+
+ for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++)
+ bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err);
+
+ if (err)
+ goto fail;
+
+ err = build_gf_tables(bch, prim_poly);
+ if (err)
+ goto fail;
+
+ /* use generator polynomial for computing encoding tables */
+ genpoly = compute_generator_polynomial(bch);
+ if (genpoly == NULL)
+ goto fail;
+
+ build_mod8_tables(bch, genpoly);
+ kfree(genpoly);
+
+ err = build_deg2_base(bch);
+ if (err)
+ goto fail;
+
+ return bch;
+
+fail:
+ bch_free(bch);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(bch_init);
+
+/**
+ * bch_free - free the BCH control structure
+ * @bch: BCH control structure to release
+ */
+void bch_free(struct bch_control *bch)
+{
+ unsigned int i;
+
+ if (bch) {
+ kfree(bch->a_pow_tab);
+ kfree(bch->a_log_tab);
+ kfree(bch->mod8_tab);
+ kfree(bch->ecc_buf);
+ kfree(bch->ecc_buf2);
+ kfree(bch->xi_tab);
+ kfree(bch->syn);
+ kfree(bch->cache);
+ kfree(bch->elp);
+
+ for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++)
+ kfree(bch->poly_2t[i]);
+
+ kfree(bch);
+ }
+}
+EXPORT_SYMBOL_GPL(bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("Binary BCH encoder/decoder");
diff --git a/lib/bitfield_kunit.c b/lib/bitfield_kunit.c
new file mode 100644
index 000000000..1473d8b4b
--- /dev/null
+++ b/lib/bitfield_kunit.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/bitfield.h>
+
+#define CHECK_ENC_GET_U(tp, v, field, res) do { \
+ { \
+ u##tp _res; \
+ \
+ _res = u##tp##_encode_bits(v, field); \
+ KUNIT_ASSERT_FALSE_MSG(context, _res != res, \
+ "u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \
+ (u64)_res); \
+ KUNIT_ASSERT_FALSE(context, \
+ u##tp##_get_bits(_res, field) != v); \
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
+ { \
+ __le##tp _res; \
+ \
+ _res = le##tp##_encode_bits(v, field); \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_le##tp(res), \
+ "le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\
+ (u64)le##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ le##tp##_get_bits(_res, field) != v);\
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
+ { \
+ __be##tp _res; \
+ \
+ _res = be##tp##_encode_bits(v, field); \
+ KUNIT_ASSERT_FALSE_MSG(context, \
+ _res != cpu_to_be##tp(res), \
+ "be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \
+ (u64)be##tp##_to_cpu(_res), \
+ (u64)(res)); \
+ KUNIT_ASSERT_FALSE(context, \
+ be##tp##_get_bits(_res, field) != v);\
+ } \
+ } while (0)
+
+#define CHECK_ENC_GET(tp, v, field, res) do { \
+ CHECK_ENC_GET_U(tp, v, field, res); \
+ CHECK_ENC_GET_LE(tp, v, field, res); \
+ CHECK_ENC_GET_BE(tp, v, field, res); \
+ } while (0)
+
+static void __init test_bitfields_constants(struct kunit *context)
+{
+ /*
+ * NOTE
+ * This whole function compiles (or at least should, if everything
+ * is going according to plan) to nothing after optimisation.
+ */
+
+ CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
+ CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
+ CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
+ CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
+ CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
+ CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
+
+ CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
+ CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
+ CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
+ CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
+
+ CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
+ CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
+ CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
+ CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
+ CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
+ CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
+
+ CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
+ CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
+ CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
+ CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
+ CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
+ CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
+}
+
+#define CHECK(tp, mask) do { \
+ u64 v; \
+ \
+ for (v = 0; v < 1 << hweight32(mask); v++) \
+ KUNIT_ASSERT_FALSE(context, \
+ tp##_encode_bits(v, mask) != v << __ffs64(mask));\
+ } while (0)
+
+static void __init test_bitfields_variables(struct kunit *context)
+{
+ CHECK(u8, 0x0f);
+ CHECK(u8, 0xf0);
+ CHECK(u8, 0x38);
+
+ CHECK(u16, 0x0038);
+ CHECK(u16, 0x0380);
+ CHECK(u16, 0x3800);
+ CHECK(u16, 0x8000);
+
+ CHECK(u32, 0x80000000);
+ CHECK(u32, 0x7f000000);
+ CHECK(u32, 0x07e00000);
+ CHECK(u32, 0x00018000);
+
+ CHECK(u64, 0x8000000000000000ull);
+ CHECK(u64, 0x7f00000000000000ull);
+ CHECK(u64, 0x0001800000000000ull);
+ CHECK(u64, 0x0000000080000000ull);
+ CHECK(u64, 0x000000007f000000ull);
+ CHECK(u64, 0x0000000018000000ull);
+ CHECK(u64, 0x0000001f8000000ull);
+}
+
+#ifdef TEST_BITFIELD_COMPILE
+static void __init test_bitfields_compile(struct kunit *context)
+{
+ /* these should fail compilation */
+ CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
+ u32_encode_bits(7, 0x06000000);
+
+ /* this should at least give a warning */
+ u16_encode_bits(0, 0x60000);
+}
+#endif
+
+static struct kunit_case __refdata bitfields_test_cases[] = {
+ KUNIT_CASE(test_bitfields_constants),
+ KUNIT_CASE(test_bitfields_variables),
+ {}
+};
+
+static struct kunit_suite bitfields_test_suite = {
+ .name = "bitfields",
+ .test_cases = bitfields_test_cases,
+};
+
+kunit_test_suites(&bitfields_test_suite);
+
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/bitmap.c b/lib/bitmap.c
new file mode 100644
index 000000000..ddb31015e
--- /dev/null
+++ b/lib/bitmap.c
@@ -0,0 +1,1551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * lib/bitmap.c
+ * Helper functions for bitmap.h.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/thread_info.h>
+#include <linux/uaccess.h>
+
+#include <asm/page.h>
+
+#include "kstrtox.h"
+
+/**
+ * DOC: bitmap introduction
+ *
+ * bitmaps provide an array of bits, implemented using an
+ * array of unsigned longs. The number of valid bits in a
+ * given bitmap does _not_ need to be an exact multiple of
+ * BITS_PER_LONG.
+ *
+ * The possible unused bits in the last, partially used word
+ * of a bitmap are 'don't care'. The implementation makes
+ * no particular effort to keep them zero. It ensures that
+ * their value will not affect the results of any operation.
+ * The bitmap operations that return Boolean (bitmap_empty,
+ * for example) or scalar (bitmap_weight, for example) results
+ * carefully filter out these unused bits from impacting their
+ * results.
+ *
+ * The byte ordering of bitmaps is more natural on little
+ * endian architectures. See the big-endian headers
+ * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
+ * for the best explanations of this ordering.
+ */
+
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] != bitmap2[k])
+ return false;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(__bitmap_equal);
+
+bool __bitmap_or_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2,
+ const unsigned long *bitmap3,
+ unsigned int bits)
+{
+ unsigned int k, lim = bits / BITS_PER_LONG;
+ unsigned long tmp;
+
+ for (k = 0; k < lim; ++k) {
+ if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
+ return false;
+ }
+
+ if (!(bits % BITS_PER_LONG))
+ return true;
+
+ tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
+ return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
+}
+
+void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
+{
+ unsigned int k, lim = BITS_TO_LONGS(bits);
+ for (k = 0; k < lim; ++k)
+ dst[k] = ~src[k];
+}
+EXPORT_SYMBOL(__bitmap_complement);
+
+/**
+ * __bitmap_shift_right - logical right shift of the bits in a bitmap
+ * @dst : destination bitmap
+ * @src : source bitmap
+ * @shift : shift by this many bits
+ * @nbits : bitmap size, in bits
+ *
+ * Shifting right (dividing) means moving bits in the MS -> LS bit
+ * direction. Zeros are fed into the vacated MS positions and the
+ * LS bits shifted off the bottom are lost.
+ */
+void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned shift, unsigned nbits)
+{
+ unsigned k, lim = BITS_TO_LONGS(nbits);
+ unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+ unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
+ for (k = 0; off + k < lim; ++k) {
+ unsigned long upper, lower;
+
+ /*
+ * If shift is not word aligned, take lower rem bits of
+ * word above and make them the top rem bits of result.
+ */
+ if (!rem || off + k + 1 >= lim)
+ upper = 0;
+ else {
+ upper = src[off + k + 1];
+ if (off + k + 1 == lim - 1)
+ upper &= mask;
+ upper <<= (BITS_PER_LONG - rem);
+ }
+ lower = src[off + k];
+ if (off + k == lim - 1)
+ lower &= mask;
+ lower >>= rem;
+ dst[k] = lower | upper;
+ }
+ if (off)
+ memset(&dst[lim - off], 0, off*sizeof(unsigned long));
+}
+EXPORT_SYMBOL(__bitmap_shift_right);
+
+
+/**
+ * __bitmap_shift_left - logical left shift of the bits in a bitmap
+ * @dst : destination bitmap
+ * @src : source bitmap
+ * @shift : shift by this many bits
+ * @nbits : bitmap size, in bits
+ *
+ * Shifting left (multiplying) means moving bits in the LS -> MS
+ * direction. Zeros are fed into the vacated LS bit positions
+ * and those MS bits shifted off the top are lost.
+ */
+
+void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
+{
+ int k;
+ unsigned int lim = BITS_TO_LONGS(nbits);
+ unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
+ for (k = lim - off - 1; k >= 0; --k) {
+ unsigned long upper, lower;
+
+ /*
+ * If shift is not word aligned, take upper rem bits of
+ * word below and make them the bottom rem bits of result.
+ */
+ if (rem && k > 0)
+ lower = src[k - 1] >> (BITS_PER_LONG - rem);
+ else
+ lower = 0;
+ upper = src[k] << rem;
+ dst[k + off] = lower | upper;
+ }
+ if (off)
+ memset(dst, 0, off*sizeof(unsigned long));
+}
+EXPORT_SYMBOL(__bitmap_shift_left);
+
+/**
+ * bitmap_cut() - remove bit region from bitmap and right shift remaining bits
+ * @dst: destination bitmap, might overlap with src
+ * @src: source bitmap
+ * @first: start bit of region to be removed
+ * @cut: number of bits to remove
+ * @nbits: bitmap size, in bits
+ *
+ * Set the n-th bit of @dst iff the n-th bit of @src is set and
+ * n is less than @first, or the m-th bit of @src is set for any
+ * m such that @first <= n < nbits, and m = n + @cut.
+ *
+ * In pictures, example for a big-endian 32-bit architecture:
+ *
+ * The @src bitmap is::
+ *
+ * 31 63
+ * | |
+ * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101
+ * | | | |
+ * 16 14 0 32
+ *
+ * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
+ *
+ * 31 63
+ * | |
+ * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010
+ * | | |
+ * 14 (bit 17 0 32
+ * from @src)
+ *
+ * Note that @dst and @src might overlap partially or entirely.
+ *
+ * This is implemented in the obvious way, with a shift and carry
+ * step for each moved bit. Optimisation is left as an exercise
+ * for the compiler.
+ */
+void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut, unsigned int nbits)
+{
+ unsigned int len = BITS_TO_LONGS(nbits);
+ unsigned long keep = 0, carry;
+ int i;
+
+ if (first % BITS_PER_LONG) {
+ keep = src[first / BITS_PER_LONG] &
+ (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
+ }
+
+ memmove(dst, src, len * sizeof(*dst));
+
+ while (cut--) {
+ for (i = first / BITS_PER_LONG; i < len; i++) {
+ if (i < len - 1)
+ carry = dst[i + 1] & 1UL;
+ else
+ carry = 0;
+
+ dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1));
+ }
+ }
+
+ dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG);
+ dst[first / BITS_PER_LONG] |= keep;
+}
+EXPORT_SYMBOL(bitmap_cut);
+
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
+ unsigned long result = 0;
+
+ for (k = 0; k < lim; k++)
+ result |= (dst[k] = bitmap1[k] & bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
+ return result != 0;
+}
+EXPORT_SYMBOL(__bitmap_and);
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] | bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_or);
+
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] ^ bitmap2[k];
+}
+EXPORT_SYMBOL(__bitmap_xor);
+
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k;
+ unsigned int lim = bits/BITS_PER_LONG;
+ unsigned long result = 0;
+
+ for (k = 0; k < lim; k++)
+ result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
+ if (bits % BITS_PER_LONG)
+ result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
+ BITMAP_LAST_WORD_MASK(bits));
+ return result != 0;
+}
+EXPORT_SYMBOL(__bitmap_andnot);
+
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(nbits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
+}
+EXPORT_SYMBOL(__bitmap_replace);
+
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & bitmap2[k])
+ return true;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(__bitmap_intersects);
+
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & ~bitmap2[k])
+ return false;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return false;
+ return true;
+}
+EXPORT_SYMBOL(__bitmap_subset);
+
+#define BITMAP_WEIGHT(FETCH, bits) \
+({ \
+ unsigned int __bits = (bits), idx, w = 0; \
+ \
+ for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \
+ w += hweight_long(FETCH); \
+ \
+ if (__bits % BITS_PER_LONG) \
+ w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \
+ \
+ w; \
+})
+
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight);
+
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_and);
+
+void __bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+EXPORT_SYMBOL(__bitmap_set);
+
+void __bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+EXPORT_SYMBOL(__bitmap_clear);
+
+/**
+ * bitmap_find_next_zero_area_off - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ * @align_offset: Alignment offset for zero area.
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds plus @align_offset
+ * is multiple of that power of 2.
+ */
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset)
+{
+ unsigned long index, end, i;
+again:
+ index = find_next_zero_bit(map, size, start);
+
+ /* Align allocation */
+ index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
+
+/*
+ * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
+ * second version by Paul Jackson, third by Joe Korty.
+ */
+
+/**
+ * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ */
+int bitmap_parse_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parse_user);
+
+/**
+ * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * @buf: page aligned buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges if list is specified or hex digits grouped into comma-separated
+ * sets of 8 digits/set. Returns the number of characters written to buf.
+ *
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
+ */
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits)
+{
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
+
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
+}
+EXPORT_SYMBOL(bitmap_print_to_pagebuf);
+
+/**
+ * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * true: print in decimal list format
+ * false: print in hexadecimal bitmask format
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ */
+static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ const char *fmt = list ? "%*pbl\n" : "%*pb\n";
+ ssize_t size;
+ void *data;
+
+ data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
+ if (!data)
+ return -ENOMEM;
+
+ size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
+ kfree(data);
+
+ return size;
+}
+
+/**
+ * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
+ * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
+ * bitmask and decimal list to userspace by sysfs ABI.
+ * Drivers might be using a normal attribute for this kind of ABIs. A
+ * normal attribute typically has show entry as below::
+ *
+ * static ssize_t example_attribute_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * ...
+ * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ * }
+ *
+ * show entry of attribute has no offset and count parameters and this
+ * means the file is limited to one page only.
+ * bitmap_print_to_pagebuf() API works terribly well for this kind of
+ * normal attribute with buf parameter and without offset, count::
+ *
+ * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * int nmaskbits)
+ * {
+ * }
+ *
+ * The problem is once we have a large bitmap, we have a chance to get a
+ * bitmask or list more than one page. Especially for list, it could be
+ * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
+ * It turns out bin_attribute is a way to break this limit. bin_attribute
+ * has show entry as below::
+ *
+ * static ssize_t
+ * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * struct bin_attribute *attr, char *buf,
+ * loff_t offset, size_t count)
+ * {
+ * ...
+ * }
+ *
+ * With the new offset and count parameters, this makes sysfs ABI be able
+ * to support file size more than one page. For example, offset could be
+ * >= 4096.
+ * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
+ * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
+ * make those drivers be able to support large bitmask and list after they
+ * move to use bin_attribute. In result, we have to pass the corresponding
+ * parameters such as off, count from bin_attribute show entry to this API.
+ *
+ * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
+ * is similar with cpumap_print_to_pagebuf(), the difference is that
+ * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
+ * the destination buffer is exactly one page and won't be more than one page.
+ * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
+ * hand, mainly serves bin_attribute which doesn't work with exact one page,
+ * and it can break the size limit of converted decimal list and hexadecimal
+ * bitmask.
+ *
+ * WARNING!
+ *
+ * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
+ * It is intended to workaround sysfs limitations discussed above and should be
+ * used carefully in general case for the following reasons:
+ *
+ * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
+ * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
+ * - @off and @count are NOT offset and number of bits to print.
+ * - If printing part of bitmap as list, the resulting string is not a correct
+ * list representation of bitmap. Particularly, some bits within or out of
+ * related interval may be erroneously set or unset. The format of the string
+ * may be broken, so bitmap_parselist-like parser may fail parsing it.
+ * - If printing the whole bitmap as list by parts, user must ensure the order
+ * of calls of the function such that the offset is incremented linearly.
+ * - If printing the whole bitmap as list by parts, user must keep bitmap
+ * unchanged between the very first and very last call. Otherwise concatenated
+ * result may be incorrect, and format may be broken.
+ *
+ * Returns the number of characters actually printed to @buf
+ */
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
+
+/**
+ * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above bitmap_print_bitmask_to_buf() except
+ * the print format.
+ */
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_list_to_buf);
+
+/*
+ * Region 9-38:4/10 describes the following bitmap structure:
+ * 0 9 12 18 38 N
+ * .........****......****......****..................
+ * ^ ^ ^ ^ ^
+ * start off group_len end nbits
+ */
+struct region {
+ unsigned int start;
+ unsigned int off;
+ unsigned int group_len;
+ unsigned int end;
+ unsigned int nbits;
+};
+
+static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
+{
+ unsigned int start;
+
+ for (start = r->start; start <= r->end; start += r->group_len)
+ bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
+}
+
+static int bitmap_check_region(const struct region *r)
+{
+ if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
+ return -EINVAL;
+
+ if (r->end >= r->nbits)
+ return -ERANGE;
+
+ return 0;
+}
+
+static const char *bitmap_getnum(const char *str, unsigned int *num,
+ unsigned int lastbit)
+{
+ unsigned long long n;
+ unsigned int len;
+
+ if (str[0] == 'N') {
+ *num = lastbit;
+ return str + 1;
+ }
+
+ len = _parse_integer(str, 10, &n);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+ if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
+ return ERR_PTR(-EOVERFLOW);
+
+ *num = n;
+ return str + len;
+}
+
+static inline bool end_of_str(char c)
+{
+ return c == '\0' || c == '\n';
+}
+
+static inline bool __end_of_region(char c)
+{
+ return isspace(c) || c == ',';
+}
+
+static inline bool end_of_region(char c)
+{
+ return __end_of_region(c) || end_of_str(c);
+}
+
+/*
+ * The format allows commas and whitespaces at the beginning
+ * of the region.
+ */
+static const char *bitmap_find_region(const char *str)
+{
+ while (__end_of_region(*str))
+ str++;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+static const char *bitmap_find_region_reverse(const char *start, const char *end)
+{
+ while (start <= end && __end_of_region(*end))
+ end--;
+
+ return end;
+}
+
+static const char *bitmap_parse_region(const char *str, struct region *r)
+{
+ unsigned int lastbit = r->nbits - 1;
+
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
+ str = bitmap_getnum(str, &r->start, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (end_of_region(*str))
+ goto no_end;
+
+ if (*str != '-')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->end, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+check_pattern:
+ if (end_of_region(*str))
+ goto no_pattern;
+
+ if (*str != ':')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->off, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (*str != '/')
+ return ERR_PTR(-EINVAL);
+
+ return bitmap_getnum(str + 1, &r->group_len, lastbit);
+
+no_end:
+ r->end = r->start;
+no_pattern:
+ r->off = r->end + 1;
+ r->group_len = r->end + 1;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+/**
+ * bitmap_parselist - convert list format ASCII string to bitmap
+ * @buf: read user string from this buffer; must be terminated
+ * with a \0 or \n.
+ * @maskp: write resulting mask here
+ * @nmaskbits: number of bits in mask to be written
+ *
+ * Input format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
+ * The value 'N' can be used as a dynamically substituted token for the
+ * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
+ * dynamic, so if system changes cause the bitmap width to change, such
+ * as more cores in a CPU list, then any ranges using N will also change.
+ *
+ * Returns: 0 on success, -errno on invalid input strings. Error values:
+ *
+ * - ``-EINVAL``: wrong region format
+ * - ``-EINVAL``: invalid character in string
+ * - ``-ERANGE``: bit number specified too large for mask
+ * - ``-EOVERFLOW``: integer overflow in the input parameters
+ */
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
+{
+ struct region r;
+ long ret;
+
+ r.nbits = nmaskbits;
+ bitmap_zero(maskp, r.nbits);
+
+ while (buf) {
+ buf = bitmap_find_region(buf);
+ if (buf == NULL)
+ return 0;
+
+ buf = bitmap_parse_region(buf, &r);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_check_region(&r);
+ if (ret)
+ return ret;
+
+ bitmap_set_region(&r, maskp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parselist);
+
+
+/**
+ * bitmap_parselist_user() - convert user buffer's list format ASCII
+ * string to bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Wrapper for bitmap_parselist(), providing it with user buffer.
+ */
+int bitmap_parselist_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parselist(buf, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parselist_user);
+
+static const char *bitmap_get_x32_reverse(const char *start,
+ const char *end, u32 *num)
+{
+ u32 ret = 0;
+ int c, i;
+
+ for (i = 0; i < 32; i += 4) {
+ c = hex_to_bin(*end--);
+ if (c < 0)
+ return ERR_PTR(-EINVAL);
+
+ ret |= c << i;
+
+ if (start > end || __end_of_region(*end))
+ goto out;
+ }
+
+ if (hex_to_bin(*end--) >= 0)
+ return ERR_PTR(-EOVERFLOW);
+out:
+ *num = ret;
+ return end;
+}
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @start: pointer to buffer containing string.
+ * @buflen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0 or \n. In that case,
+ * UINT_MAX may be provided instead of string length.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks. Each chunk defines exactly 32
+ * bits of the resultant bitmask. No chunk may specify a value larger
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. %-EINVAL is returned for illegal
+ * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
+ * Leading, embedded and trailing whitespace accepted.
+ */
+int bitmap_parse(const char *start, unsigned int buflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ const char *end = strnchrnul(start, buflen, '\n') - 1;
+ int chunks = BITS_TO_U32(nmaskbits);
+ u32 *bitmap = (u32 *)maskp;
+ int unset_bit;
+ int chunk;
+
+ for (chunk = 0; ; chunk++) {
+ end = bitmap_find_region_reverse(start, end);
+ if (start > end)
+ break;
+
+ if (!chunks--)
+ return -EOVERFLOW;
+
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
+#else
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
+#endif
+ if (IS_ERR(end))
+ return PTR_ERR(end);
+ }
+
+ unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
+ if (unset_bit < nmaskbits) {
+ bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
+ return 0;
+ }
+
+ if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
+ return -EOVERFLOW;
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
+
+/**
+ * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
+ * @buf: pointer to a bitmap
+ * @pos: a bit position in @buf (0 <= @pos < @nbits)
+ * @nbits: number of valid bit positions in @buf
+ *
+ * Map the bit at position @pos in @buf (of length @nbits) to the
+ * ordinal of which set bit it is. If it is not set or if @pos
+ * is not a valid bit position, map to -1.
+ *
+ * If for example, just bits 4 through 7 are set in @buf, then @pos
+ * values 4 through 7 will get mapped to 0 through 3, respectively,
+ * and other @pos values will get mapped to -1. When @pos value 7
+ * gets mapped to (returns) @ord value 3 in this example, that means
+ * that bit 7 is the 3rd (starting with 0th) set bit in @buf.
+ *
+ * The bit positions 0 through @bits are valid positions in @buf.
+ */
+static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
+{
+ if (pos >= nbits || !test_bit(pos, buf))
+ return -1;
+
+ return bitmap_weight(buf, pos);
+}
+
+/**
+ * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
+ * @dst: remapped result
+ * @src: subset to be remapped
+ * @old: defines domain of map
+ * @new: defines range of map
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Let @old and @new define a mapping of bit positions, such that
+ * whatever position is held by the n-th set bit in @old is mapped
+ * to the n-th set bit in @new. In the more general case, allowing
+ * for the possibility that the weight 'w' of @new is less than the
+ * weight of @old, map the position of the n-th set bit in @old to
+ * the position of the m-th set bit in @new, where m == n % w.
+ *
+ * If either of the @old and @new bitmaps are empty, or if @src and
+ * @dst point to the same location, then this routine copies @src
+ * to @dst.
+ *
+ * The positions of unset bits in @old are mapped to themselves
+ * (the identify map).
+ *
+ * Apply the above specified mapping to @src, placing the result in
+ * @dst, clearing any bits previously set in @dst.
+ *
+ * For example, lets say that @old has bits 4 through 7 set, and
+ * @new has bits 12 through 15 set. This defines the mapping of bit
+ * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
+ * bit positions unchanged. So if say @src comes into this routine
+ * with bits 1, 5 and 7 set, then @dst should leave with bits 1,
+ * 13 and 15 set.
+ */
+void bitmap_remap(unsigned long *dst, const unsigned long *src,
+ const unsigned long *old, const unsigned long *new,
+ unsigned int nbits)
+{
+ unsigned int oldbit, w;
+
+ if (dst == src) /* following doesn't handle inplace remaps */
+ return;
+ bitmap_zero(dst, nbits);
+
+ w = bitmap_weight(new, nbits);
+ for_each_set_bit(oldbit, src, nbits) {
+ int n = bitmap_pos_to_ord(old, oldbit, nbits);
+
+ if (n < 0 || w == 0)
+ set_bit(oldbit, dst); /* identity map */
+ else
+ set_bit(find_nth_bit(new, nbits, n % w), dst);
+ }
+}
+EXPORT_SYMBOL(bitmap_remap);
+
+/**
+ * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
+ * @oldbit: bit position to be mapped
+ * @old: defines domain of map
+ * @new: defines range of map
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Let @old and @new define a mapping of bit positions, such that
+ * whatever position is held by the n-th set bit in @old is mapped
+ * to the n-th set bit in @new. In the more general case, allowing
+ * for the possibility that the weight 'w' of @new is less than the
+ * weight of @old, map the position of the n-th set bit in @old to
+ * the position of the m-th set bit in @new, where m == n % w.
+ *
+ * The positions of unset bits in @old are mapped to themselves
+ * (the identify map).
+ *
+ * Apply the above specified mapping to bit position @oldbit, returning
+ * the new bit position.
+ *
+ * For example, lets say that @old has bits 4 through 7 set, and
+ * @new has bits 12 through 15 set. This defines the mapping of bit
+ * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
+ * bit positions unchanged. So if say @oldbit is 5, then this routine
+ * returns 13.
+ */
+int bitmap_bitremap(int oldbit, const unsigned long *old,
+ const unsigned long *new, int bits)
+{
+ int w = bitmap_weight(new, bits);
+ int n = bitmap_pos_to_ord(old, oldbit, bits);
+ if (n < 0 || w == 0)
+ return oldbit;
+ else
+ return find_nth_bit(new, bits, n % w);
+}
+EXPORT_SYMBOL(bitmap_bitremap);
+
+#ifdef CONFIG_NUMA
+/**
+ * bitmap_onto - translate one bitmap relative to another
+ * @dst: resulting translated bitmap
+ * @orig: original untranslated bitmap
+ * @relmap: bitmap relative to which translated
+ * @bits: number of bits in each of these bitmaps
+ *
+ * Set the n-th bit of @dst iff there exists some m such that the
+ * n-th bit of @relmap is set, the m-th bit of @orig is set, and
+ * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
+ * (If you understood the previous sentence the first time your
+ * read it, you're overqualified for your current job.)
+ *
+ * In other words, @orig is mapped onto (surjectively) @dst,
+ * using the map { <n, m> | the n-th bit of @relmap is the
+ * m-th set bit of @relmap }.
+ *
+ * Any set bits in @orig above bit number W, where W is the
+ * weight of (number of set bits in) @relmap are mapped nowhere.
+ * In particular, if for all bits m set in @orig, m >= W, then
+ * @dst will end up empty. In situations where the possibility
+ * of such an empty result is not desired, one way to avoid it is
+ * to use the bitmap_fold() operator, below, to first fold the
+ * @orig bitmap over itself so that all its set bits x are in the
+ * range 0 <= x < W. The bitmap_fold() operator does this by
+ * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
+ *
+ * Example [1] for bitmap_onto():
+ * Let's say @relmap has bits 30-39 set, and @orig has bits
+ * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
+ * @dst will have bits 31, 33, 35, 37 and 39 set.
+ *
+ * When bit 0 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the first bit (if any)
+ * that is turned on in @relmap. Since bit 0 was off in the
+ * above example, we leave off that bit (bit 30) in @dst.
+ *
+ * When bit 1 is set in @orig (as in the above example), it
+ * means turn on the bit in @dst corresponding to whatever
+ * is the second bit that is turned on in @relmap. The second
+ * bit in @relmap that was turned on in the above example was
+ * bit 31, so we turned on bit 31 in @dst.
+ *
+ * Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
+ * because they were the 4th, 6th, 8th and 10th set bits
+ * set in @relmap, and the 4th, 6th, 8th and 10th bits of
+ * @orig (i.e. bits 3, 5, 7 and 9) were also set.
+ *
+ * When bit 11 is set in @orig, it means turn on the bit in
+ * @dst corresponding to whatever is the twelfth bit that is
+ * turned on in @relmap. In the above example, there were
+ * only ten bits turned on in @relmap (30..39), so that bit
+ * 11 was set in @orig had no affect on @dst.
+ *
+ * Example [2] for bitmap_fold() + bitmap_onto():
+ * Let's say @relmap has these ten bits set::
+ *
+ * 40 41 42 43 45 48 53 61 74 95
+ *
+ * (for the curious, that's 40 plus the first ten terms of the
+ * Fibonacci sequence.)
+ *
+ * Further lets say we use the following code, invoking
+ * bitmap_fold() then bitmap_onto, as suggested above to
+ * avoid the possibility of an empty @dst result::
+ *
+ * unsigned long *tmp; // a temporary bitmap's bits
+ *
+ * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
+ * bitmap_onto(dst, tmp, relmap, bits);
+ *
+ * Then this table shows what various values of @dst would be, for
+ * various @orig's. I list the zero-based positions of each set bit.
+ * The tmp column shows the intermediate result, as computed by
+ * using bitmap_fold() to fold the @orig bitmap modulo ten
+ * (the weight of @relmap):
+ *
+ * =============== ============== =================
+ * @orig tmp @dst
+ * 0 0 40
+ * 1 1 41
+ * 9 9 95
+ * 10 0 40 [#f1]_
+ * 1 3 5 7 1 3 5 7 41 43 48 61
+ * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
+ * 0 9 18 27 0 9 8 7 40 61 74 95
+ * 0 10 20 30 0 40
+ * 0 11 22 33 0 1 2 3 40 41 42 43
+ * 0 12 24 36 0 2 4 6 40 42 45 53
+ * 78 102 211 1 2 8 41 42 74 [#f1]_
+ * =============== ============== =================
+ *
+ * .. [#f1]
+ *
+ * For these marked lines, if we hadn't first done bitmap_fold()
+ * into tmp, then the @dst result would have been empty.
+ *
+ * If either of @orig or @relmap is empty (no set bits), then @dst
+ * will be returned empty.
+ *
+ * If (as explained above) the only set bits in @orig are in positions
+ * m where m >= W, (where W is the weight of @relmap) then @dst will
+ * once again be returned empty.
+ *
+ * All bits in @dst not set by the above rule are cleared.
+ */
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, unsigned int bits)
+{
+ unsigned int n, m; /* same meaning as in above comment */
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, bits);
+
+ /*
+ * The following code is a more efficient, but less
+ * obvious, equivalent to the loop:
+ * for (m = 0; m < bitmap_weight(relmap, bits); m++) {
+ * n = find_nth_bit(orig, bits, m);
+ * if (test_bit(m, orig))
+ * set_bit(n, dst);
+ * }
+ */
+
+ m = 0;
+ for_each_set_bit(n, relmap, bits) {
+ /* m == bitmap_pos_to_ord(relmap, n, bits) */
+ if (test_bit(m, orig))
+ set_bit(n, dst);
+ m++;
+ }
+}
+
+/**
+ * bitmap_fold - fold larger bitmap into smaller, modulo specified size
+ * @dst: resulting smaller bitmap
+ * @orig: original larger bitmap
+ * @sz: specified size
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
+ * Clear all other bits in @dst. See further the comment and
+ * Example [2] for bitmap_onto() for why and how to use this.
+ */
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ unsigned int sz, unsigned int nbits)
+{
+ unsigned int oldbit;
+
+ if (dst == orig) /* following doesn't handle inplace mappings */
+ return;
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(oldbit, orig, nbits)
+ set_bit(oldbit % sz, dst);
+}
+#endif /* CONFIG_NUMA */
+
+/*
+ * Common code for bitmap_*_region() routines.
+ * bitmap: array of unsigned longs corresponding to the bitmap
+ * pos: the beginning of the region
+ * order: region size (log base 2 of number of bits)
+ * reg_op: operation(s) to perform on that region of bitmap
+ *
+ * Can set, verify and/or release a region of bits in a bitmap,
+ * depending on which combination of REG_OP_* flag bits is set.
+ *
+ * A region of a bitmap is a sequence of bits in the bitmap, of
+ * some size '1 << order' (a power of two), aligned to that same
+ * '1 << order' power of two.
+ *
+ * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
+ * Returns 0 in all other cases and reg_ops.
+ */
+
+enum {
+ REG_OP_ISFREE, /* true if region is all zero bits */
+ REG_OP_ALLOC, /* set all bits in region */
+ REG_OP_RELEASE, /* clear all bits in region */
+};
+
+static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
+{
+ int nbits_reg; /* number of bits in region */
+ int index; /* index first long of region in bitmap */
+ int offset; /* bit offset region in bitmap[index] */
+ int nlongs_reg; /* num longs spanned by region in bitmap */
+ int nbitsinlong; /* num bits of region in each spanned long */
+ unsigned long mask; /* bitmask for one long of region */
+ int i; /* scans bitmap by longs */
+ int ret = 0; /* return value */
+
+ /*
+ * Either nlongs_reg == 1 (for small orders that fit in one long)
+ * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
+ */
+ nbits_reg = 1 << order;
+ index = pos / BITS_PER_LONG;
+ offset = pos - (index * BITS_PER_LONG);
+ nlongs_reg = BITS_TO_LONGS(nbits_reg);
+ nbitsinlong = min(nbits_reg, BITS_PER_LONG);
+
+ /*
+ * Can't do "mask = (1UL << nbitsinlong) - 1", as that
+ * overflows if nbitsinlong == BITS_PER_LONG.
+ */
+ mask = (1UL << (nbitsinlong - 1));
+ mask += mask - 1;
+ mask <<= offset;
+
+ switch (reg_op) {
+ case REG_OP_ISFREE:
+ for (i = 0; i < nlongs_reg; i++) {
+ if (bitmap[index + i] & mask)
+ goto done;
+ }
+ ret = 1; /* all bits in region free (zero) */
+ break;
+
+ case REG_OP_ALLOC:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] |= mask;
+ break;
+
+ case REG_OP_RELEASE:
+ for (i = 0; i < nlongs_reg; i++)
+ bitmap[index + i] &= ~mask;
+ break;
+ }
+done:
+ return ret;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Return the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
+ if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ continue;
+ __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+ return pos;
+ }
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(bitmap_find_free_region);
+
+/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
+ *
+ * No return value.
+ */
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ __reg_op(bitmap, pos, order, REG_OP_RELEASE);
+}
+EXPORT_SYMBOL(bitmap_release_region);
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Return 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+ return -EBUSY;
+ return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+}
+EXPORT_SYMBOL(bitmap_allocate_region);
+
+/**
+ * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
+ * @dst: destination buffer
+ * @src: bitmap to copy
+ * @nbits: number of bits in the bitmap
+ *
+ * Require nbits % BITS_PER_LONG == 0.
+ */
+#ifdef __BIG_ENDIAN
+void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
+{
+ unsigned int i;
+
+ for (i = 0; i < nbits/BITS_PER_LONG; i++) {
+ if (BITS_PER_LONG == 64)
+ dst[i] = cpu_to_le64(src[i]);
+ else
+ dst[i] = cpu_to_le32(src[i]);
+ }
+}
+EXPORT_SYMBOL(bitmap_copy_le);
+#endif
+
+unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
+{
+ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags);
+}
+EXPORT_SYMBOL(bitmap_alloc);
+
+unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
+{
+ return bitmap_alloc(nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL(bitmap_zalloc);
+
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
+{
+ return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
+ flags, node);
+}
+EXPORT_SYMBOL(bitmap_alloc_node);
+
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
+{
+ return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
+}
+EXPORT_SYMBOL(bitmap_zalloc_node);
+
+void bitmap_free(const unsigned long *bitmap)
+{
+ kfree(bitmap);
+}
+EXPORT_SYMBOL(bitmap_free);
+
+static void devm_bitmap_free(void *data)
+{
+ unsigned long *bitmap = data;
+
+ bitmap_free(bitmap);
+}
+
+unsigned long *devm_bitmap_alloc(struct device *dev,
+ unsigned int nbits, gfp_t flags)
+{
+ unsigned long *bitmap;
+ int ret;
+
+ bitmap = bitmap_alloc(nbits, flags);
+ if (!bitmap)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
+ if (ret)
+ return NULL;
+
+ return bitmap;
+}
+EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
+
+unsigned long *devm_bitmap_zalloc(struct device *dev,
+ unsigned int nbits, gfp_t flags)
+{
+ return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
+}
+EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
+
+#if BITS_PER_LONG == 64
+/**
+ * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
+ * @bitmap: array of unsigned longs, the destination bitmap
+ * @buf: array of u32 (in host byte order), the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
+{
+ unsigned int i, halfwords;
+
+ halfwords = DIV_ROUND_UP(nbits, 32);
+ for (i = 0; i < halfwords; i++) {
+ bitmap[i/2] = (unsigned long) buf[i];
+ if (++i < halfwords)
+ bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
+ }
+
+ /* Clear tail bits in last word beyond nbits. */
+ if (nbits % BITS_PER_LONG)
+ bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+EXPORT_SYMBOL(bitmap_from_arr32);
+
+/**
+ * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
+ * @buf: array of u32 (in host byte order), the dest bitmap
+ * @bitmap: array of unsigned longs, the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
+{
+ unsigned int i, halfwords;
+
+ halfwords = DIV_ROUND_UP(nbits, 32);
+ for (i = 0; i < halfwords; i++) {
+ buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
+ if (++i < halfwords)
+ buf[i] = (u32) (bitmap[i/2] >> 32);
+ }
+
+ /* Clear tail bits in last element of array beyond nbits. */
+ if (nbits % BITS_PER_LONG)
+ buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
+}
+EXPORT_SYMBOL(bitmap_to_arr32);
+#endif
+
+#if BITS_PER_LONG == 32
+/**
+ * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
+ * @bitmap: array of unsigned longs, the destination bitmap
+ * @buf: array of u64 (in host byte order), the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
+{
+ int n;
+
+ for (n = nbits; n > 0; n -= 64) {
+ u64 val = *buf++;
+
+ *bitmap++ = val;
+ if (n > 32)
+ *bitmap++ = val >> 32;
+ }
+
+ /*
+ * Clear tail bits in the last word beyond nbits.
+ *
+ * Negative index is OK because here we point to the word next
+ * to the last word of the bitmap, except for nbits == 0, which
+ * is tested implicitly.
+ */
+ if (nbits % BITS_PER_LONG)
+ bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
+}
+EXPORT_SYMBOL(bitmap_from_arr64);
+
+/**
+ * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
+ * @buf: array of u64 (in host byte order), the dest bitmap
+ * @bitmap: array of unsigned longs, the source bitmap
+ * @nbits: number of bits in @bitmap
+ */
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
+{
+ const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
+
+ while (bitmap < end) {
+ *buf = *bitmap++;
+ if (bitmap < end)
+ *buf |= (u64)(*bitmap++) << 32;
+ buf++;
+ }
+
+ /* Clear tail bits in the last element of array beyond nbits. */
+ if (nbits % 64)
+ buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
+}
+EXPORT_SYMBOL(bitmap_to_arr64);
+#endif
diff --git a/lib/bitrev.c b/lib/bitrev.c
new file mode 100644
index 000000000..81b56e0a7
--- /dev/null
+++ b/lib/bitrev.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef CONFIG_HAVE_ARCH_BITREVERSE
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/bitrev.h>
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("Bit ordering reversal functions");
+MODULE_LICENSE("GPL");
+
+const u8 byte_rev_table[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+EXPORT_SYMBOL_GPL(byte_rev_table);
+
+#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
diff --git a/lib/bootconfig-data.S b/lib/bootconfig-data.S
new file mode 100644
index 000000000..ef85ba1a8
--- /dev/null
+++ b/lib/bootconfig-data.S
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Embed default bootconfig in the kernel.
+ */
+ .section .init.rodata, "aw"
+ .global embedded_bootconfig_data
+embedded_bootconfig_data:
+ .incbin "lib/default.bconf"
+ .global embedded_bootconfig_data_end
+embedded_bootconfig_data_end:
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
new file mode 100644
index 000000000..c59d26068
--- /dev/null
+++ b/lib/bootconfig.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extra Boot Config
+ * Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#ifdef __KERNEL__
+#include <linux/bootconfig.h>
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/string.h>
+
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+/* embedded_bootconfig_data is defined in bootconfig-data.S */
+extern __visible const char embedded_bootconfig_data[];
+extern __visible const char embedded_bootconfig_data_end[];
+
+const char * __init xbc_get_embedded_bootconfig(size_t *size)
+{
+ *size = embedded_bootconfig_data_end - embedded_bootconfig_data;
+ return (*size) ? embedded_bootconfig_data : NULL;
+}
+#endif
+
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean lib/bootconfig.c is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#include <linux/bootconfig.h>
+#endif
+
+/*
+ * Extra Boot Config (XBC) is given as tree-structured ascii text of
+ * key-value pairs on memory.
+ * xbc_parse() parses the text to build a simple tree. Each tree node is
+ * simply a key word or a value. A key node may have a next key node or/and
+ * a child node (both key and value). A value node may have a next value
+ * node (for array).
+ */
+
+static struct xbc_node *xbc_nodes __initdata;
+static int xbc_node_num __initdata;
+static char *xbc_data __initdata;
+static size_t xbc_data_size __initdata;
+static struct xbc_node *last_parent __initdata;
+static const char *xbc_err_msg __initdata;
+static int xbc_err_pos __initdata;
+static int open_brace[XBC_DEPTH_MAX] __initdata;
+static int brace_index __initdata;
+
+#ifdef __KERNEL__
+static inline void * __init xbc_alloc_mem(size_t size)
+{
+ return memblock_alloc(size, SMP_CACHE_BYTES);
+}
+
+static inline void __init xbc_free_mem(void *addr, size_t size)
+{
+ memblock_free(addr, size);
+}
+
+#else /* !__KERNEL__ */
+
+static inline void *xbc_alloc_mem(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void xbc_free_mem(void *addr, size_t size)
+{
+ free(addr);
+}
+#endif
+/**
+ * xbc_get_info() - Get the information of loaded boot config
+ * @node_size: A pointer to store the number of nodes.
+ * @data_size: A pointer to store the size of bootconfig data.
+ *
+ * Get the number of used nodes in @node_size if it is not NULL,
+ * and the size of bootconfig data in @data_size if it is not NULL.
+ * Return 0 if the boot config is initialized, or return -ENODEV.
+ */
+int __init xbc_get_info(int *node_size, size_t *data_size)
+{
+ if (!xbc_data)
+ return -ENODEV;
+
+ if (node_size)
+ *node_size = xbc_node_num;
+ if (data_size)
+ *data_size = xbc_data_size;
+ return 0;
+}
+
+static int __init xbc_parse_error(const char *msg, const char *p)
+{
+ xbc_err_msg = msg;
+ xbc_err_pos = (int)(p - xbc_data);
+
+ return -EINVAL;
+}
+
+/**
+ * xbc_root_node() - Get the root node of extended boot config
+ *
+ * Return the address of root node of extended boot config. If the
+ * extended boot config is not initiized, return NULL.
+ */
+struct xbc_node * __init xbc_root_node(void)
+{
+ if (unlikely(!xbc_data))
+ return NULL;
+
+ return xbc_nodes;
+}
+
+/**
+ * xbc_node_index() - Get the index of XBC node
+ * @node: A target node of getting index.
+ *
+ * Return the index number of @node in XBC node list.
+ */
+int __init xbc_node_index(struct xbc_node *node)
+{
+ return node - &xbc_nodes[0];
+}
+
+/**
+ * xbc_node_get_parent() - Get the parent XBC node
+ * @node: An XBC node.
+ *
+ * Return the parent node of @node. If the node is top node of the tree,
+ * return NULL.
+ */
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node)
+{
+ return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent];
+}
+
+/**
+ * xbc_node_get_child() - Get the child XBC node
+ * @node: An XBC node.
+ *
+ * Return the first child node of @node. If the node has no child, return
+ * NULL.
+ */
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node)
+{
+ return node->child ? &xbc_nodes[node->child] : NULL;
+}
+
+/**
+ * xbc_node_get_next() - Get the next sibling XBC node
+ * @node: An XBC node.
+ *
+ * Return the NEXT sibling node of @node. If the node has no next sibling,
+ * return NULL. Note that even if this returns NULL, it doesn't mean @node
+ * has no siblings. (You also has to check whether the parent's child node
+ * is @node or not.)
+ */
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node)
+{
+ return node->next ? &xbc_nodes[node->next] : NULL;
+}
+
+/**
+ * xbc_node_get_data() - Get the data of XBC node
+ * @node: An XBC node.
+ *
+ * Return the data (which is always a null terminated string) of @node.
+ * If the node has invalid data, warn and return NULL.
+ */
+const char * __init xbc_node_get_data(struct xbc_node *node)
+{
+ int offset = node->data & ~XBC_VALUE;
+
+ if (WARN_ON(offset >= xbc_data_size))
+ return NULL;
+
+ return xbc_data + offset;
+}
+
+static bool __init
+xbc_node_match_prefix(struct xbc_node *node, const char **prefix)
+{
+ const char *p = xbc_node_get_data(node);
+ int len = strlen(p);
+
+ if (strncmp(*prefix, p, len))
+ return false;
+
+ p = *prefix + len;
+ if (*p == '.')
+ p++;
+ else if (*p != '\0')
+ return false;
+ *prefix = p;
+
+ return true;
+}
+
+/**
+ * xbc_node_find_subkey() - Find a subkey node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ *
+ * Search a key node under @parent which matches @key. The @key can contain
+ * several words jointed with '.'. If @parent is NULL, this searches the
+ * node from whole tree. Return NULL if no node is matched.
+ */
+struct xbc_node * __init
+xbc_node_find_subkey(struct xbc_node *parent, const char *key)
+{
+ struct xbc_node *node;
+
+ if (parent)
+ node = xbc_node_get_subkey(parent);
+ else
+ node = xbc_root_node();
+
+ while (node && xbc_node_is_key(node)) {
+ if (!xbc_node_match_prefix(node, &key))
+ node = xbc_node_get_next(node);
+ else if (*key != '\0')
+ node = xbc_node_get_subkey(node);
+ else
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * xbc_node_find_value() - Find a value node which matches given key
+ * @parent: An XBC node.
+ * @key: A key string.
+ * @vnode: A container pointer of found XBC node.
+ *
+ * Search a value node under @parent whose (parent) key node matches @key,
+ * store it in *@vnode, and returns the value string.
+ * The @key can contain several words jointed with '.'. If @parent is NULL,
+ * this searches the node from whole tree. Return the value string if a
+ * matched key found, return NULL if no node is matched.
+ * Note that this returns 0-length string and stores NULL in *@vnode if the
+ * key has no value. And also it will return the value of the first entry if
+ * the value is an array.
+ */
+const char * __init
+xbc_node_find_value(struct xbc_node *parent, const char *key,
+ struct xbc_node **vnode)
+{
+ struct xbc_node *node = xbc_node_find_subkey(parent, key);
+
+ if (!node || !xbc_node_is_key(node))
+ return NULL;
+
+ node = xbc_node_get_child(node);
+ if (node && !xbc_node_is_value(node))
+ return NULL;
+
+ if (vnode)
+ *vnode = node;
+
+ return node ? xbc_node_get_data(node) : "";
+}
+
+/**
+ * xbc_node_compose_key_after() - Compose partial key string of the XBC node
+ * @root: Root XBC node
+ * @node: Target XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the partial key of the @node into @buf, which is starting right
+ * after @root (@root is not included.) If @root is NULL, this returns full
+ * key words of @node.
+ * Returns the total length of the key stored in @buf. Returns -EINVAL
+ * if @node is NULL or @root is not the ancestor of @node or @root is @node,
+ * or returns -ERANGE if the key depth is deeper than max depth.
+ * This is expected to be used with xbc_find_node() to list up all (child)
+ * keys under given key.
+ */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node,
+ char *buf, size_t size)
+{
+ uint16_t keys[XBC_DEPTH_MAX];
+ int depth = 0, ret = 0, total = 0;
+
+ if (!node || node == root)
+ return -EINVAL;
+
+ if (xbc_node_is_value(node))
+ node = xbc_node_get_parent(node);
+
+ while (node && node != root) {
+ keys[depth++] = xbc_node_index(node);
+ if (depth == XBC_DEPTH_MAX)
+ return -ERANGE;
+ node = xbc_node_get_parent(node);
+ }
+ if (!node && root)
+ return -EINVAL;
+
+ while (--depth >= 0) {
+ node = xbc_nodes + keys[depth];
+ ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node),
+ depth ? "." : "");
+ if (ret < 0)
+ return ret;
+ if (ret > size) {
+ size = 0;
+ } else {
+ size -= ret;
+ buf += ret;
+ }
+ total += ret;
+ }
+
+ return total;
+}
+
+/**
+ * xbc_node_find_next_leaf() - Find the next leaf node under given node
+ * @root: An XBC root node
+ * @node: An XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of @node
+ * under @root node (including @root node itself).
+ * Return the next node or NULL if next leaf node is not found.
+ */
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *node)
+{
+ struct xbc_node *next;
+
+ if (unlikely(!xbc_data))
+ return NULL;
+
+ if (!node) { /* First try */
+ node = root;
+ if (!node)
+ node = xbc_nodes;
+ } else {
+ /* Leaf node may have a subkey */
+ next = xbc_node_get_subkey(node);
+ if (next) {
+ node = next;
+ goto found;
+ }
+
+ if (node == root) /* @root was a leaf, no child node. */
+ return NULL;
+
+ while (!node->next) {
+ node = xbc_node_get_parent(node);
+ if (node == root)
+ return NULL;
+ /* User passed a node which is not uder parent */
+ if (WARN_ON(!node))
+ return NULL;
+ }
+ node = xbc_node_get_next(node);
+ }
+
+found:
+ while (node && !xbc_node_is_leaf(node))
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+/**
+ * xbc_node_find_next_key_value() - Find the next key-value pair nodes
+ * @root: An XBC root node
+ * @leaf: A container pointer of XBC node which starts from.
+ *
+ * Search the next leaf node (which means the terminal key node) of *@leaf
+ * under @root node. Returns the value and update *@leaf if next leaf node
+ * is found, or NULL if no next leaf node is found.
+ * Note that this returns 0-length string if the key has no value, or
+ * the value of the first entry if the value is an array.
+ */
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf)
+{
+ /* tip must be passed */
+ if (WARN_ON(!leaf))
+ return NULL;
+
+ *leaf = xbc_node_find_next_leaf(root, *leaf);
+ if (!*leaf)
+ return NULL;
+ if ((*leaf)->child)
+ return xbc_node_get_data(xbc_node_get_child(*leaf));
+ else
+ return ""; /* No value key */
+}
+
+/* XBC parse and tree build */
+
+static int __init xbc_init_node(struct xbc_node *node, char *data, uint32_t flag)
+{
+ unsigned long offset = data - xbc_data;
+
+ if (WARN_ON(offset >= XBC_DATA_MAX))
+ return -EINVAL;
+
+ node->data = (uint16_t)offset | flag;
+ node->child = 0;
+ node->next = 0;
+
+ return 0;
+}
+
+static struct xbc_node * __init xbc_add_node(char *data, uint32_t flag)
+{
+ struct xbc_node *node;
+
+ if (xbc_node_num == XBC_NODE_MAX)
+ return NULL;
+
+ node = &xbc_nodes[xbc_node_num++];
+ if (xbc_init_node(node, data, flag) < 0)
+ return NULL;
+
+ return node;
+}
+
+static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node)
+{
+ while (node->next)
+ node = xbc_node_get_next(node);
+
+ return node;
+}
+
+static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node)
+{
+ while (node->child)
+ node = xbc_node_get_child(node);
+
+ return node;
+}
+
+static struct xbc_node * __init __xbc_add_sibling(char *data, uint32_t flag, bool head)
+{
+ struct xbc_node *sib, *node = xbc_add_node(data, flag);
+
+ if (node) {
+ if (!last_parent) {
+ /* Ignore @head in this case */
+ node->parent = XBC_NODE_MAX;
+ sib = xbc_last_sibling(xbc_nodes);
+ sib->next = xbc_node_index(node);
+ } else {
+ node->parent = xbc_node_index(last_parent);
+ if (!last_parent->child || head) {
+ node->next = last_parent->child;
+ last_parent->child = xbc_node_index(node);
+ } else {
+ sib = xbc_node_get_child(last_parent);
+ sib = xbc_last_sibling(sib);
+ sib->next = xbc_node_index(node);
+ }
+ }
+ } else
+ xbc_parse_error("Too many nodes", data);
+
+ return node;
+}
+
+static inline struct xbc_node * __init xbc_add_sibling(char *data, uint32_t flag)
+{
+ return __xbc_add_sibling(data, flag, false);
+}
+
+static inline struct xbc_node * __init xbc_add_head_sibling(char *data, uint32_t flag)
+{
+ return __xbc_add_sibling(data, flag, true);
+}
+
+static inline __init struct xbc_node *xbc_add_child(char *data, uint32_t flag)
+{
+ struct xbc_node *node = xbc_add_sibling(data, flag);
+
+ if (node)
+ last_parent = node;
+
+ return node;
+}
+
+static inline __init bool xbc_valid_keyword(char *key)
+{
+ if (key[0] == '\0')
+ return false;
+
+ while (isalnum(*key) || *key == '-' || *key == '_')
+ key++;
+
+ return *key == '\0';
+}
+
+static char *skip_comment(char *p)
+{
+ char *ret;
+
+ ret = strchr(p, '\n');
+ if (!ret)
+ ret = p + strlen(p);
+ else
+ ret++;
+
+ return ret;
+}
+
+static char *skip_spaces_until_newline(char *p)
+{
+ while (isspace(*p) && *p != '\n')
+ p++;
+ return p;
+}
+
+static int __init __xbc_open_brace(char *p)
+{
+ /* Push the last key as open brace */
+ open_brace[brace_index++] = xbc_node_index(last_parent);
+ if (brace_index >= XBC_DEPTH_MAX)
+ return xbc_parse_error("Exceed max depth of braces", p);
+
+ return 0;
+}
+
+static int __init __xbc_close_brace(char *p)
+{
+ brace_index--;
+ if (!last_parent || brace_index < 0 ||
+ (open_brace[brace_index] != xbc_node_index(last_parent)))
+ return xbc_parse_error("Unexpected closing brace", p);
+
+ if (brace_index == 0)
+ last_parent = NULL;
+ else
+ last_parent = &xbc_nodes[open_brace[brace_index - 1]];
+
+ return 0;
+}
+
+/*
+ * Return delimiter or error, no node added. As same as lib/cmdline.c,
+ * you can use " around spaces, but can't escape " for value.
+ */
+static int __init __xbc_parse_value(char **__v, char **__n)
+{
+ char *p, *v = *__v;
+ int c, quotes = 0;
+
+ v = skip_spaces(v);
+ while (*v == '#') {
+ v = skip_comment(v);
+ v = skip_spaces(v);
+ }
+ if (*v == '"' || *v == '\'') {
+ quotes = *v;
+ v++;
+ }
+ p = v - 1;
+ while ((c = *++p)) {
+ if (!isprint(c) && !isspace(c))
+ return xbc_parse_error("Non printable value", p);
+ if (quotes) {
+ if (c != quotes)
+ continue;
+ quotes = 0;
+ *p++ = '\0';
+ p = skip_spaces_until_newline(p);
+ c = *p;
+ if (c && !strchr(",;\n#}", c))
+ return xbc_parse_error("No value delimiter", p);
+ if (*p)
+ p++;
+ break;
+ }
+ if (strchr(",;\n#}", c)) {
+ *p++ = '\0';
+ v = strim(v);
+ break;
+ }
+ }
+ if (quotes)
+ return xbc_parse_error("No closing quotes", p);
+ if (c == '#') {
+ p = skip_comment(p);
+ c = '\n'; /* A comment must be treated as a newline */
+ }
+ *__n = p;
+ *__v = v;
+
+ return c;
+}
+
+static int __init xbc_parse_array(char **__v)
+{
+ struct xbc_node *node;
+ char *next;
+ int c = 0;
+
+ if (last_parent->child)
+ last_parent = xbc_node_get_child(last_parent);
+
+ do {
+ c = __xbc_parse_value(__v, &next);
+ if (c < 0)
+ return c;
+
+ node = xbc_add_child(*__v, XBC_VALUE);
+ if (!node)
+ return -ENOMEM;
+ *__v = next;
+ } while (c == ',');
+ node->child = 0;
+
+ return c;
+}
+
+static inline __init
+struct xbc_node *find_match_node(struct xbc_node *node, char *k)
+{
+ while (node) {
+ if (!strcmp(xbc_node_get_data(node), k))
+ break;
+ node = xbc_node_get_next(node);
+ }
+ return node;
+}
+
+static int __init __xbc_add_key(char *k)
+{
+ struct xbc_node *node, *child;
+
+ if (!xbc_valid_keyword(k))
+ return xbc_parse_error("Invalid keyword", k);
+
+ if (unlikely(xbc_node_num == 0))
+ goto add_node;
+
+ if (!last_parent) /* the first level */
+ node = find_match_node(xbc_nodes, k);
+ else {
+ child = xbc_node_get_child(last_parent);
+ /* Since the value node is the first child, skip it. */
+ if (child && xbc_node_is_value(child))
+ child = xbc_node_get_next(child);
+ node = find_match_node(child, k);
+ }
+
+ if (node)
+ last_parent = node;
+ else {
+add_node:
+ node = xbc_add_child(k, XBC_KEY);
+ if (!node)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int __init __xbc_parse_keys(char *k)
+{
+ char *p;
+ int ret;
+
+ k = strim(k);
+ while ((p = strchr(k, '.'))) {
+ *p++ = '\0';
+ ret = __xbc_add_key(k);
+ if (ret)
+ return ret;
+ k = p;
+ }
+
+ return __xbc_add_key(k);
+}
+
+static int __init xbc_parse_kv(char **k, char *v, int op)
+{
+ struct xbc_node *prev_parent = last_parent;
+ struct xbc_node *child;
+ char *next;
+ int c, ret;
+
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+
+ c = __xbc_parse_value(&v, &next);
+ if (c < 0)
+ return c;
+
+ child = xbc_node_get_child(last_parent);
+ if (child && xbc_node_is_value(child)) {
+ if (op == '=')
+ return xbc_parse_error("Value is redefined", v);
+ if (op == ':') {
+ unsigned short nidx = child->next;
+
+ xbc_init_node(child, v, XBC_VALUE);
+ child->next = nidx; /* keep subkeys */
+ goto array;
+ }
+ /* op must be '+' */
+ last_parent = xbc_last_child(child);
+ }
+ /* The value node should always be the first child */
+ if (!xbc_add_head_sibling(v, XBC_VALUE))
+ return -ENOMEM;
+
+array:
+ if (c == ',') { /* Array */
+ c = xbc_parse_array(&next);
+ if (c < 0)
+ return c;
+ }
+
+ last_parent = prev_parent;
+
+ if (c == '}') {
+ ret = __xbc_close_brace(next - 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ *k = next;
+
+ return 0;
+}
+
+static int __init xbc_parse_key(char **k, char *n)
+{
+ struct xbc_node *prev_parent = last_parent;
+ int ret;
+
+ *k = strim(*k);
+ if (**k != '\0') {
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+ last_parent = prev_parent;
+ }
+ *k = n;
+
+ return 0;
+}
+
+static int __init xbc_open_brace(char **k, char *n)
+{
+ int ret;
+
+ ret = __xbc_parse_keys(*k);
+ if (ret)
+ return ret;
+ *k = n;
+
+ return __xbc_open_brace(n - 1);
+}
+
+static int __init xbc_close_brace(char **k, char *n)
+{
+ int ret;
+
+ ret = xbc_parse_key(k, n);
+ if (ret)
+ return ret;
+ /* k is updated in xbc_parse_key() */
+
+ return __xbc_close_brace(n - 1);
+}
+
+static int __init xbc_verify_tree(void)
+{
+ int i, depth, len, wlen;
+ struct xbc_node *n, *m;
+
+ /* Brace closing */
+ if (brace_index) {
+ n = &xbc_nodes[open_brace[brace_index]];
+ return xbc_parse_error("Brace is not closed",
+ xbc_node_get_data(n));
+ }
+
+ /* Empty tree */
+ if (xbc_node_num == 0) {
+ xbc_parse_error("Empty config", xbc_data);
+ return -ENOENT;
+ }
+
+ for (i = 0; i < xbc_node_num; i++) {
+ if (xbc_nodes[i].next > xbc_node_num) {
+ return xbc_parse_error("No closing brace",
+ xbc_node_get_data(xbc_nodes + i));
+ }
+ }
+
+ /* Key tree limitation check */
+ n = &xbc_nodes[0];
+ depth = 1;
+ len = 0;
+
+ while (n) {
+ wlen = strlen(xbc_node_get_data(n)) + 1;
+ len += wlen;
+ if (len > XBC_KEYLEN_MAX)
+ return xbc_parse_error("Too long key length",
+ xbc_node_get_data(n));
+
+ m = xbc_node_get_child(n);
+ if (m && xbc_node_is_key(m)) {
+ n = m;
+ depth++;
+ if (depth > XBC_DEPTH_MAX)
+ return xbc_parse_error("Too many key words",
+ xbc_node_get_data(n));
+ continue;
+ }
+ len -= wlen;
+ m = xbc_node_get_next(n);
+ while (!m) {
+ n = xbc_node_get_parent(n);
+ if (!n)
+ break;
+ len -= strlen(xbc_node_get_data(n)) + 1;
+ depth--;
+ m = xbc_node_get_next(n);
+ }
+ n = m;
+ }
+
+ return 0;
+}
+
+/* Need to setup xbc_data and xbc_nodes before call this. */
+static int __init xbc_parse_tree(void)
+{
+ char *p, *q;
+ int ret = 0, c;
+
+ last_parent = NULL;
+ p = xbc_data;
+ do {
+ q = strpbrk(p, "{}=+;:\n#");
+ if (!q) {
+ p = skip_spaces(p);
+ if (*p != '\0')
+ ret = xbc_parse_error("No delimiter", p);
+ break;
+ }
+
+ c = *q;
+ *q++ = '\0';
+ switch (c) {
+ case ':':
+ case '+':
+ if (*q++ != '=') {
+ ret = xbc_parse_error(c == '+' ?
+ "Wrong '+' operator" :
+ "Wrong ':' operator",
+ q - 2);
+ break;
+ }
+ fallthrough;
+ case '=':
+ ret = xbc_parse_kv(&p, q, c);
+ break;
+ case '{':
+ ret = xbc_open_brace(&p, q);
+ break;
+ case '#':
+ q = skip_comment(q);
+ fallthrough;
+ case ';':
+ case '\n':
+ ret = xbc_parse_key(&p, q);
+ break;
+ case '}':
+ ret = xbc_close_brace(&p, q);
+ break;
+ }
+ } while (!ret);
+
+ return ret;
+}
+
+/**
+ * xbc_exit() - Clean up all parsed bootconfig
+ *
+ * This clears all data structures of parsed bootconfig on memory.
+ * If you need to reuse xbc_init() with new boot config, you can
+ * use this.
+ */
+void __init xbc_exit(void)
+{
+ xbc_free_mem(xbc_data, xbc_data_size);
+ xbc_data = NULL;
+ xbc_data_size = 0;
+ xbc_node_num = 0;
+ xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
+ xbc_nodes = NULL;
+ brace_index = 0;
+}
+
+/**
+ * xbc_init() - Parse given XBC file and build XBC internal tree
+ * @data: The boot config text original data
+ * @size: The size of @data
+ * @emsg: A pointer of const char * to store the error message
+ * @epos: A pointer of int to store the error position
+ *
+ * This parses the boot config text in @data. @size must be smaller
+ * than XBC_DATA_MAX.
+ * Return the number of stored nodes (>0) if succeeded, or -errno
+ * if there is any error.
+ * In error cases, @emsg will be updated with an error message and
+ * @epos will be updated with the error position which is the byte offset
+ * of @buf. If the error is not a parser error, @epos will be -1.
+ */
+int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
+{
+ int ret;
+
+ if (epos)
+ *epos = -1;
+
+ if (xbc_data) {
+ if (emsg)
+ *emsg = "Bootconfig is already initialized";
+ return -EBUSY;
+ }
+ if (size > XBC_DATA_MAX || size == 0) {
+ if (emsg)
+ *emsg = size ? "Config data is too big" :
+ "Config data is empty";
+ return -ERANGE;
+ }
+
+ xbc_data = xbc_alloc_mem(size + 1);
+ if (!xbc_data) {
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig data";
+ return -ENOMEM;
+ }
+ memcpy(xbc_data, data, size);
+ xbc_data[size] = '\0';
+ xbc_data_size = size + 1;
+
+ xbc_nodes = xbc_alloc_mem(sizeof(struct xbc_node) * XBC_NODE_MAX);
+ if (!xbc_nodes) {
+ if (emsg)
+ *emsg = "Failed to allocate bootconfig nodes";
+ xbc_exit();
+ return -ENOMEM;
+ }
+ memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
+
+ ret = xbc_parse_tree();
+ if (!ret)
+ ret = xbc_verify_tree();
+
+ if (ret < 0) {
+ if (epos)
+ *epos = xbc_err_pos;
+ if (emsg)
+ *emsg = xbc_err_msg;
+ xbc_exit();
+ } else
+ ret = xbc_node_num;
+
+ return ret;
+}
diff --git a/lib/bsearch.c b/lib/bsearch.c
new file mode 100644
index 000000000..bf86aa66f
--- /dev/null
+++ b/lib/bsearch.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A generic implementation of binary search for the Linux kernel
+ *
+ * Copyright (C) 2008-2009 Ksplice, Inc.
+ * Author: Tim Abbott <tabbott@ksplice.com>
+ */
+
+#include <linux/export.h>
+#include <linux/bsearch.h>
+#include <linux/kprobes.h>
+
+/*
+ * bsearch - binary search an array of elements
+ * @key: pointer to item being searched for
+ * @base: pointer to first element to search
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp: pointer to comparison function
+ *
+ * This function does a binary search on the given array. The
+ * contents of the array should already be in ascending sorted order
+ * under the provided comparison function.
+ *
+ * Note that the key need not have the same type as the elements in
+ * the array, e.g. key could be a string and the comparison function
+ * could compare the string with the struct's name field. However, if
+ * the key and elements in the array are of the same type, you can use
+ * the same comparison function for both sort() and bsearch().
+ */
+void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp)
+{
+ return __inline_bsearch(key, base, num, size, cmp);
+}
+EXPORT_SYMBOL(bsearch);
+NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/btree.c b/lib/btree.c
new file mode 100644
index 000000000..49420cae3
--- /dev/null
+++ b/lib/btree.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * lib/btree.c - Simple In-memory B+Tree
+ *
+ * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com>
+ * Bits and pieces stolen from Peter Zijlstra's code, which is
+ * Copyright 2007, Red Hat Inc. Peter Zijlstra
+ *
+ * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
+ *
+ * A relatively simple B+Tree implementation. I have written it as a learning
+ * exercise to understand how B+Trees work. Turned out to be useful as well.
+ *
+ * B+Trees can be used similar to Linux radix trees (which don't have anything
+ * in common with textbook radix trees, beware). Prerequisite for them working
+ * well is that access to a random tree node is much faster than a large number
+ * of operations within each node.
+ *
+ * Disks have fulfilled the prerequisite for a long time. More recently DRAM
+ * has gained similar properties, as memory access times, when measured in cpu
+ * cycles, have increased. Cacheline sizes have increased as well, which also
+ * helps B+Trees.
+ *
+ * Compared to radix trees, B+Trees are more efficient when dealing with a
+ * sparsely populated address space. Between 25% and 50% of the memory is
+ * occupied with valid pointers. When densely populated, radix trees contain
+ * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
+ * pointers.
+ *
+ * This particular implementation stores pointers identified by a long value.
+ * Storing NULL pointers is illegal, lookup will return NULL when no entry
+ * was found.
+ *
+ * A tricks was used that is not commonly found in textbooks. The lowest
+ * values are to the right, not to the left. All used slots within a node
+ * are on the left, all unused slots contain NUL values. Most operations
+ * simply loop once over all slots and terminate on the first NUL.
+ */
+
+#include <linux/btree.h>
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#define NODESIZE MAX(L1_CACHE_BYTES, 128)
+
+struct btree_geo {
+ int keylen;
+ int no_pairs;
+ int no_longs;
+};
+
+struct btree_geo btree_geo32 = {
+ .keylen = 1,
+ .no_pairs = NODESIZE / sizeof(long) / 2,
+ .no_longs = NODESIZE / sizeof(long) / 2,
+};
+EXPORT_SYMBOL_GPL(btree_geo32);
+
+#define LONG_PER_U64 (64 / BITS_PER_LONG)
+struct btree_geo btree_geo64 = {
+ .keylen = LONG_PER_U64,
+ .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
+ .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
+};
+EXPORT_SYMBOL_GPL(btree_geo64);
+
+struct btree_geo btree_geo128 = {
+ .keylen = 2 * LONG_PER_U64,
+ .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
+ .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
+};
+EXPORT_SYMBOL_GPL(btree_geo128);
+
+#define MAX_KEYLEN (2 * LONG_PER_U64)
+
+static struct kmem_cache *btree_cachep;
+
+void *btree_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ return kmem_cache_alloc(btree_cachep, gfp_mask);
+}
+EXPORT_SYMBOL_GPL(btree_alloc);
+
+void btree_free(void *element, void *pool_data)
+{
+ kmem_cache_free(btree_cachep, element);
+}
+EXPORT_SYMBOL_GPL(btree_free);
+
+static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
+{
+ unsigned long *node;
+
+ node = mempool_alloc(head->mempool, gfp);
+ if (likely(node))
+ memset(node, 0, NODESIZE);
+ return node;
+}
+
+static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ if (l1[i] < l2[i])
+ return -1;
+ if (l1[i] > l2[i])
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
+ size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ dest[i] = src[i];
+ return dest;
+}
+
+static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ s[i] = c;
+ return s;
+}
+
+static void dec_key(struct btree_geo *geo, unsigned long *key)
+{
+ unsigned long val;
+ int i;
+
+ for (i = geo->keylen - 1; i >= 0; i--) {
+ val = key[i];
+ key[i] = val - 1;
+ if (val)
+ break;
+ }
+}
+
+static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
+{
+ return &node[n * geo->keylen];
+}
+
+static void *bval(struct btree_geo *geo, unsigned long *node, int n)
+{
+ return (void *)node[geo->no_longs + n];
+}
+
+static void setkey(struct btree_geo *geo, unsigned long *node, int n,
+ unsigned long *key)
+{
+ longcpy(bkey(geo, node, n), key, geo->keylen);
+}
+
+static void setval(struct btree_geo *geo, unsigned long *node, int n,
+ void *val)
+{
+ node[geo->no_longs + n] = (unsigned long) val;
+}
+
+static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
+{
+ longset(bkey(geo, node, n), 0, geo->keylen);
+ node[geo->no_longs + n] = 0;
+}
+
+static inline void __btree_init(struct btree_head *head)
+{
+ head->node = NULL;
+ head->height = 0;
+}
+
+void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
+{
+ __btree_init(head);
+ head->mempool = mempool;
+}
+EXPORT_SYMBOL_GPL(btree_init_mempool);
+
+int btree_init(struct btree_head *head)
+{
+ __btree_init(head);
+ head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
+ if (!head->mempool)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btree_init);
+
+void btree_destroy(struct btree_head *head)
+{
+ mempool_free(head->node, head->mempool);
+ mempool_destroy(head->mempool);
+ head->mempool = NULL;
+}
+EXPORT_SYMBOL_GPL(btree_destroy);
+
+void *btree_last(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return NULL;
+
+ for ( ; height > 1; height--)
+ node = bval(geo, node, 0);
+
+ longcpy(key, bkey(geo, node, 0), geo->keylen);
+ return bval(geo, node, 0);
+}
+EXPORT_SYMBOL_GPL(btree_last);
+
+static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
+ unsigned long *key)
+{
+ return longcmp(bkey(geo, node, pos), key, geo->keylen);
+}
+
+static int keyzero(struct btree_geo *geo, unsigned long *key)
+{
+ int i;
+
+ for (i = 0; i < geo->keylen; i++)
+ if (key[i])
+ return 0;
+
+ return 1;
+}
+
+static void *btree_lookup_node(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int i, height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return NULL;
+
+ for ( ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ return NULL;
+ node = bval(geo, node, i);
+ if (!node)
+ return NULL;
+ }
+ return node;
+}
+
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int i;
+ unsigned long *node;
+
+ node = btree_lookup_node(head, geo, key);
+ if (!node)
+ return NULL;
+
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) == 0)
+ return bval(geo, node, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(btree_lookup);
+
+int btree_update(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val)
+{
+ int i;
+ unsigned long *node;
+
+ node = btree_lookup_node(head, geo, key);
+ if (!node)
+ return -ENOENT;
+
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) == 0) {
+ setval(geo, node, i, val);
+ return 0;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(btree_update);
+
+/*
+ * Usually this function is quite similar to normal lookup. But the key of
+ * a parent node may be smaller than the smallest key of all its siblings.
+ * In such a case we cannot just return NULL, as we have only proven that no
+ * key smaller than __key, but larger than this parent key exists.
+ * So we set __key to the parent key and retry. We have to use the smallest
+ * such parent key, which is the last parent key we encountered.
+ */
+void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *__key)
+{
+ int i, height;
+ unsigned long *node, *oldnode;
+ unsigned long *retry_key = NULL, key[MAX_KEYLEN];
+
+ if (keyzero(geo, __key))
+ return NULL;
+
+ if (head->height == 0)
+ return NULL;
+ longcpy(key, __key, geo->keylen);
+retry:
+ dec_key(geo, key);
+
+ node = head->node;
+ for (height = head->height ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ goto miss;
+ oldnode = node;
+ node = bval(geo, node, i);
+ if (!node)
+ goto miss;
+ retry_key = bkey(geo, oldnode, i);
+ }
+
+ if (!node)
+ goto miss;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ if (keycmp(geo, node, i, key) <= 0) {
+ if (bval(geo, node, i)) {
+ longcpy(__key, bkey(geo, node, i), geo->keylen);
+ return bval(geo, node, i);
+ } else
+ goto miss;
+ }
+ }
+miss:
+ if (retry_key) {
+ longcpy(key, retry_key, geo->keylen);
+ retry_key = NULL;
+ goto retry;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(btree_get_prev);
+
+static int getpos(struct btree_geo *geo, unsigned long *node,
+ unsigned long *key)
+{
+ int i;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ }
+ return i;
+}
+
+static int getfill(struct btree_geo *geo, unsigned long *node, int start)
+{
+ int i;
+
+ for (i = start; i < geo->no_pairs; i++)
+ if (!bval(geo, node, i))
+ break;
+ return i;
+}
+
+/*
+ * locate the correct leaf node in the btree
+ */
+static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level)
+{
+ unsigned long *node = head->node;
+ int i, height;
+
+ for (height = head->height; height > level; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+
+ if ((i == geo->no_pairs) || !bval(geo, node, i)) {
+ /* right-most key is too large, update it */
+ /* FIXME: If the right-most key on higher levels is
+ * always zero, this wouldn't be necessary. */
+ i--;
+ setkey(geo, node, i, key);
+ }
+ BUG_ON(i < 0);
+ node = bval(geo, node, i);
+ }
+ BUG_ON(!node);
+ return node;
+}
+
+static int btree_grow(struct btree_head *head, struct btree_geo *geo,
+ gfp_t gfp)
+{
+ unsigned long *node;
+ int fill;
+
+ node = btree_node_alloc(head, gfp);
+ if (!node)
+ return -ENOMEM;
+ if (head->node) {
+ fill = getfill(geo, head->node, 0);
+ setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
+ setval(geo, node, 0, head->node);
+ }
+ head->node = node;
+ head->height++;
+ return 0;
+}
+
+static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
+{
+ unsigned long *node;
+ int fill;
+
+ if (head->height <= 1)
+ return;
+
+ node = head->node;
+ fill = getfill(geo, node, 0);
+ BUG_ON(fill > 1);
+ head->node = bval(geo, node, 0);
+ head->height--;
+ mempool_free(node, head->mempool);
+}
+
+static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, int level,
+ gfp_t gfp)
+{
+ unsigned long *node;
+ int i, pos, fill, err;
+
+ BUG_ON(!val);
+ if (head->height < level) {
+ err = btree_grow(head, geo, gfp);
+ if (err)
+ return err;
+ }
+
+retry:
+ node = find_level(head, geo, key, level);
+ pos = getpos(geo, node, key);
+ fill = getfill(geo, node, pos);
+ /* two identical keys are not allowed */
+ BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
+
+ if (fill == geo->no_pairs) {
+ /* need to split node */
+ unsigned long *new;
+
+ new = btree_node_alloc(head, gfp);
+ if (!new)
+ return -ENOMEM;
+ err = btree_insert_level(head, geo,
+ bkey(geo, node, fill / 2 - 1),
+ new, level + 1, gfp);
+ if (err) {
+ mempool_free(new, head->mempool);
+ return err;
+ }
+ for (i = 0; i < fill / 2; i++) {
+ setkey(geo, new, i, bkey(geo, node, i));
+ setval(geo, new, i, bval(geo, node, i));
+ setkey(geo, node, i, bkey(geo, node, i + fill / 2));
+ setval(geo, node, i, bval(geo, node, i + fill / 2));
+ clearpair(geo, node, i + fill / 2);
+ }
+ if (fill & 1) {
+ setkey(geo, node, i, bkey(geo, node, fill - 1));
+ setval(geo, node, i, bval(geo, node, fill - 1));
+ clearpair(geo, node, fill - 1);
+ }
+ goto retry;
+ }
+ BUG_ON(fill >= geo->no_pairs);
+
+ /* shift and insert */
+ for (i = fill; i > pos; i--) {
+ setkey(geo, node, i, bkey(geo, node, i - 1));
+ setval(geo, node, i, bval(geo, node, i - 1));
+ }
+ setkey(geo, node, pos, key);
+ setval(geo, node, pos, val);
+
+ return 0;
+}
+
+int btree_insert(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, gfp_t gfp)
+{
+ BUG_ON(!val);
+ return btree_insert_level(head, geo, key, val, 1, gfp);
+}
+EXPORT_SYMBOL_GPL(btree_insert);
+
+static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level);
+static void merge(struct btree_head *head, struct btree_geo *geo, int level,
+ unsigned long *left, int lfill,
+ unsigned long *right, int rfill,
+ unsigned long *parent, int lpos)
+{
+ int i;
+
+ for (i = 0; i < rfill; i++) {
+ /* Move all keys to the left */
+ setkey(geo, left, lfill + i, bkey(geo, right, i));
+ setval(geo, left, lfill + i, bval(geo, right, i));
+ }
+ /* Exchange left and right child in parent */
+ setval(geo, parent, lpos, right);
+ setval(geo, parent, lpos + 1, left);
+ /* Remove left (formerly right) child from parent */
+ btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
+ mempool_free(right, head->mempool);
+}
+
+static void rebalance(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level, unsigned long *child, int fill)
+{
+ unsigned long *parent, *left = NULL, *right = NULL;
+ int i, no_left, no_right;
+
+ if (fill == 0) {
+ /* Because we don't steal entries from a neighbour, this case
+ * can happen. Parent node contains a single child, this
+ * node, so merging with a sibling never happens.
+ */
+ btree_remove_level(head, geo, key, level + 1);
+ mempool_free(child, head->mempool);
+ return;
+ }
+
+ parent = find_level(head, geo, key, level + 1);
+ i = getpos(geo, parent, key);
+ BUG_ON(bval(geo, parent, i) != child);
+
+ if (i > 0) {
+ left = bval(geo, parent, i - 1);
+ no_left = getfill(geo, left, 0);
+ if (fill + no_left <= geo->no_pairs) {
+ merge(head, geo, level,
+ left, no_left,
+ child, fill,
+ parent, i - 1);
+ return;
+ }
+ }
+ if (i + 1 < getfill(geo, parent, i)) {
+ right = bval(geo, parent, i + 1);
+ no_right = getfill(geo, right, 0);
+ if (fill + no_right <= geo->no_pairs) {
+ merge(head, geo, level,
+ child, fill,
+ right, no_right,
+ parent, i);
+ return;
+ }
+ }
+ /*
+ * We could also try to steal one entry from the left or right
+ * neighbor. By not doing so we changed the invariant from
+ * "all nodes are at least half full" to "no two neighboring
+ * nodes can be merged". Which means that the average fill of
+ * all nodes is still half or better.
+ */
+}
+
+static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level)
+{
+ unsigned long *node;
+ int i, pos, fill;
+ void *ret;
+
+ if (level > head->height) {
+ /* we recursed all the way up */
+ head->height = 0;
+ head->node = NULL;
+ return NULL;
+ }
+
+ node = find_level(head, geo, key, level);
+ pos = getpos(geo, node, key);
+ fill = getfill(geo, node, pos);
+ if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
+ return NULL;
+ ret = bval(geo, node, pos);
+
+ /* remove and shift */
+ for (i = pos; i < fill - 1; i++) {
+ setkey(geo, node, i, bkey(geo, node, i + 1));
+ setval(geo, node, i, bval(geo, node, i + 1));
+ }
+ clearpair(geo, node, fill - 1);
+
+ if (fill - 1 < geo->no_pairs / 2) {
+ if (level < head->height)
+ rebalance(head, geo, key, level, node, fill - 1);
+ else if (fill - 1 == 1)
+ btree_shrink(head, geo);
+ }
+
+ return ret;
+}
+
+void *btree_remove(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ if (head->height == 0)
+ return NULL;
+
+ return btree_remove_level(head, geo, key, 1);
+}
+EXPORT_SYMBOL_GPL(btree_remove);
+
+int btree_merge(struct btree_head *target, struct btree_head *victim,
+ struct btree_geo *geo, gfp_t gfp)
+{
+ unsigned long key[MAX_KEYLEN];
+ unsigned long dup[MAX_KEYLEN];
+ void *val;
+ int err;
+
+ BUG_ON(target == victim);
+
+ if (!(target->node)) {
+ /* target is empty, just copy fields over */
+ target->node = victim->node;
+ target->height = victim->height;
+ __btree_init(victim);
+ return 0;
+ }
+
+ /* TODO: This needs some optimizations. Currently we do three tree
+ * walks to remove a single object from the victim.
+ */
+ for (;;) {
+ if (!btree_last(victim, geo, key))
+ break;
+ val = btree_lookup(victim, geo, key);
+ err = btree_insert(target, geo, key, val, gfp);
+ if (err)
+ return err;
+ /* We must make a copy of the key, as the original will get
+ * mangled inside btree_remove. */
+ longcpy(dup, key, geo->keylen);
+ btree_remove(victim, geo, dup);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btree_merge);
+
+static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *node, unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key, size_t index,
+ void *func2),
+ void *func2, int reap, int height, size_t count)
+{
+ int i;
+ unsigned long *child;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ child = bval(geo, node, i);
+ if (!child)
+ break;
+ if (height > 1)
+ count = __btree_for_each(head, geo, child, opaque,
+ func, func2, reap, height - 1, count);
+ else
+ func(child, opaque, bkey(geo, node, i), count++,
+ func2);
+ }
+ if (reap)
+ mempool_free(node, head->mempool);
+ return count;
+}
+
+static void empty(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *func2)
+{
+}
+
+void visitorl(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *__func)
+{
+ visitorl_t func = __func;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitorl);
+
+void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor32_t func = __func;
+ u32 *key = (void *)__key;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitor32);
+
+void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor64_t func = __func;
+ u64 *key = (void *)__key;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitor64);
+
+void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor128_t func = __func;
+ u64 *key = (void *)__key;
+
+ func(elem, opaque, key[0], key[1], index);
+}
+EXPORT_SYMBOL_GPL(visitor128);
+
+size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2)
+{
+ size_t count = 0;
+
+ if (!func2)
+ func = empty;
+ if (head->node)
+ count = __btree_for_each(head, geo, head->node, opaque, func,
+ func2, 0, head->height, 0);
+ return count;
+}
+EXPORT_SYMBOL_GPL(btree_visitor);
+
+size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2)
+{
+ size_t count = 0;
+
+ if (!func2)
+ func = empty;
+ if (head->node)
+ count = __btree_for_each(head, geo, head->node, opaque, func,
+ func2, 1, head->height, 0);
+ __btree_init(head);
+ return count;
+}
+EXPORT_SYMBOL_GPL(btree_grim_visitor);
+
+static int __init btree_module_init(void)
+{
+ btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ return 0;
+}
+
+static void __exit btree_module_exit(void)
+{
+ kmem_cache_destroy(btree_cachep);
+}
+
+/* If core code starts using btree, initialization should happen even earlier */
+module_init(btree_module_init);
+module_exit(btree_module_exit);
+
+MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 000000000..64b92e1db
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
+ * indicate the number of elements to allocate in the array. max_size
+ * gives the maximum number of elements to allocate. cpu_mult gives
+ * the number of locks per CPU to allocate. The size is rounded up
+ * to a power of 2 to be suitable as a hash table.
+ */
+
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp,
+ const char *name, struct lock_class_key *key)
+{
+ spinlock_t *tlocks = NULL;
+ unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+ unsigned int nr_pcpus = 2;
+#else
+ unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+ if (cpu_mult) {
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
+ size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
+ } else {
+ size = max_size;
+ }
+
+ if (sizeof(spinlock_t) != 0) {
+ tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
+ if (!tlocks)
+ return -ENOMEM;
+ for (i = 0; i < size; i++) {
+ spin_lock_init(&tlocks[i]);
+ lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
+ }
+ }
+
+ *locks = tlocks;
+ *locks_mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__alloc_bucket_spinlocks);
+
+void free_bucket_spinlocks(spinlock_t *locks)
+{
+ kvfree(locks);
+}
+EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/bug.c b/lib/bug.c
new file mode 100644
index 000000000..e0ff21989
--- /dev/null
+++ b/lib/bug.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ Generic support for BUG()
+
+ This respects the following config options:
+
+ CONFIG_BUG - emit BUG traps. Nothing happens without this.
+ CONFIG_GENERIC_BUG - enable this code.
+ CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file
+ CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
+
+ CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
+ (though they're generally always on).
+
+ CONFIG_GENERIC_BUG is set by each architecture using this code.
+
+ To use this, your architecture must:
+
+ 1. Set up the config options:
+ - Enable CONFIG_GENERIC_BUG if CONFIG_BUG
+
+ 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
+ - Define HAVE_ARCH_BUG
+ - Implement BUG() to generate a faulting instruction
+ - NOTE: struct bug_entry does not have "file" or "line" entries
+ when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
+ the values accordingly.
+
+ 3. Implement the trap
+ - In the illegal instruction trap handler (typically), verify
+ that the fault was in kernel mode, and call report_bug()
+ - report_bug() will return whether it was a false alarm, a warning,
+ or an actual bug.
+ - You must implement the is_valid_bugaddr(bugaddr) callback which
+ returns true if the eip is a real kernel address, and it points
+ to the expected BUG trap instruction.
+
+ Jeremy Fitzhardinge <jeremy@goop.org> 2006
+ */
+
+#define pr_fmt(fmt) fmt
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/sched.h>
+#include <linux/rculist.h>
+#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
+
+extern struct bug_entry __start___bug_table[], __stop___bug_table[];
+
+static inline unsigned long bug_addr(const struct bug_entry *bug)
+{
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp;
+#else
+ return bug->bug_addr;
+#endif
+}
+
+#ifdef CONFIG_MODULES
+/* Updates are protected by module mutex */
+static LIST_HEAD(module_bug_list);
+
+static struct bug_entry *module_find_bug(unsigned long bugaddr)
+{
+ struct module *mod;
+ struct bug_entry *bug = NULL;
+
+ rcu_read_lock_sched();
+ list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
+ unsigned i;
+
+ bug = mod->bug_table;
+ for (i = 0; i < mod->num_bugs; ++i, ++bug)
+ if (bugaddr == bug_addr(bug))
+ goto out;
+ }
+ bug = NULL;
+out:
+ rcu_read_unlock_sched();
+
+ return bug;
+}
+
+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+ char *secstrings;
+ unsigned int i;
+
+ mod->bug_table = NULL;
+ mod->num_bugs = 0;
+
+ /* Find the __bug_table section, if present */
+ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
+ continue;
+ mod->bug_table = (void *) sechdrs[i].sh_addr;
+ mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
+ break;
+ }
+
+ /*
+ * Strictly speaking this should have a spinlock to protect against
+ * traversals, but since we only traverse on BUG()s, a spinlock
+ * could potentially lead to deadlock and thus be counter-productive.
+ * Thus, this uses RCU to safely manipulate the bug list, since BUG
+ * must run in non-interruptive state.
+ */
+ list_add_rcu(&mod->bug_list, &module_bug_list);
+}
+
+void module_bug_cleanup(struct module *mod)
+{
+ list_del_rcu(&mod->bug_list);
+}
+
+#else
+
+static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
+{
+ return NULL;
+}
+#endif
+
+void bug_get_file_line(struct bug_entry *bug, const char **file,
+ unsigned int *line)
+{
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ *file = (const char *)&bug->file_disp + bug->file_disp;
+#else
+ *file = bug->file;
+#endif
+ *line = bug->line;
+#else
+ *file = NULL;
+ *line = 0;
+#endif
+}
+
+struct bug_entry *find_bug(unsigned long bugaddr)
+{
+ struct bug_entry *bug;
+
+ for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
+ if (bugaddr == bug_addr(bug))
+ return bug;
+
+ return module_find_bug(bugaddr);
+}
+
+static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
+{
+ struct bug_entry *bug;
+ const char *file;
+ unsigned line, warning, once, done;
+
+ if (!is_valid_bugaddr(bugaddr))
+ return BUG_TRAP_TYPE_NONE;
+
+ bug = find_bug(bugaddr);
+ if (!bug)
+ return BUG_TRAP_TYPE_NONE;
+
+ disable_trace_on_warning();
+
+ bug_get_file_line(bug, &file, &line);
+
+ warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ once = (bug->flags & BUGFLAG_ONCE) != 0;
+ done = (bug->flags & BUGFLAG_DONE) != 0;
+
+ if (warning && once) {
+ if (done)
+ return BUG_TRAP_TYPE_WARN;
+
+ /*
+ * Since this is the only store, concurrency is not an issue.
+ */
+ bug->flags |= BUGFLAG_DONE;
+ }
+
+ /*
+ * BUG() and WARN_ON() families don't print a custom debug message
+ * before triggering the exception handler, so we must add the
+ * "cut here" line now. WARN() issues its own "cut here" before the
+ * extra debugging message it writes before triggering the handler.
+ */
+ if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
+ printk(KERN_DEFAULT CUT_HERE);
+
+ if (warning) {
+ /* this is a WARN_ON rather than BUG/BUG_ON */
+ __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
+ NULL);
+ return BUG_TRAP_TYPE_WARN;
+ }
+
+ if (file)
+ pr_crit("kernel BUG at %s:%u!\n", file, line);
+ else
+ pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
+ (void *)bugaddr);
+
+ return BUG_TRAP_TYPE_BUG;
+}
+
+enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+{
+ enum bug_trap_type ret;
+ bool rcu = false;
+
+ rcu = warn_rcu_enter();
+ ret = __report_bug(bugaddr, regs);
+ warn_rcu_exit(rcu);
+
+ return ret;
+}
+
+static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
+{
+ struct bug_entry *bug;
+
+ for (bug = start; bug < end; bug++)
+ bug->flags &= ~BUGFLAG_DONE;
+}
+
+void generic_bug_clear_once(void)
+{
+#ifdef CONFIG_MODULES
+ struct module *mod;
+
+ rcu_read_lock_sched();
+ list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
+ clear_once_table(mod->bug_table,
+ mod->bug_table + mod->num_bugs);
+ rcu_read_unlock_sched();
+#endif
+
+ clear_once_table(__start___bug_table, __stop___bug_table);
+}
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
new file mode 100755
index 000000000..d7fc32ea8
--- /dev/null
+++ b/lib/build_OID_registry
@@ -0,0 +1,203 @@
+#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Build a static ASN.1 Object Identified (OID) registry
+#
+# Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+# Written by David Howells (dhowells@redhat.com)
+#
+
+use strict;
+
+my @names = ();
+my @oids = ();
+
+if ($#ARGV != 1) {
+ print STDERR "Format: ", $0, " <in-h-file> <out-c-file>\n";
+ exit(2);
+}
+
+#
+# Open the file to read from
+#
+open IN_FILE, "<$ARGV[0]" || die;
+while (<IN_FILE>) {
+ chomp;
+ if (m!\s+OID_([a-zA-z][a-zA-Z0-9_]+),\s+/[*]\s+([012][.0-9]*)\s+[*]/!) {
+ push @names, $1;
+ push @oids, $2;
+ }
+}
+close IN_FILE || die;
+
+#
+# Open the files to write into
+#
+open C_FILE, ">$ARGV[1]" or die;
+print C_FILE "/*\n";
+print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
+print C_FILE " */\n";
+
+#
+# Split the data up into separate lists and also determine the lengths of the
+# encoded data arrays.
+#
+my @indices = ();
+my @lengths = ();
+my $total_length = 0;
+
+for (my $i = 0; $i <= $#names; $i++) {
+ my $name = $names[$i];
+ my $oid = $oids[$i];
+
+ my @components = split(/[.]/, $oid);
+
+ # Determine the encoded length of this OID
+ my $size = $#components;
+ for (my $loop = 2; $loop <= $#components; $loop++) {
+ my $c = $components[$loop];
+
+ # We will base128 encode the number
+ my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
+ $tmp = int($tmp / 7);
+ $size += $tmp;
+ }
+ push @lengths, $size;
+ push @indices, $total_length;
+ $total_length += $size;
+}
+
+#
+# Emit the look-up-by-OID index table
+#
+print C_FILE "\n";
+if ($total_length <= 255) {
+ print C_FILE "static const unsigned char oid_index[OID__NR + 1] = {\n";
+} else {
+ print C_FILE "static const unsigned short oid_index[OID__NR + 1] = {\n";
+}
+for (my $i = 0; $i <= $#names; $i++) {
+ print C_FILE "\t[OID_", $names[$i], "] = ", $indices[$i], ",\n"
+}
+print C_FILE "\t[OID__NR] = ", $total_length, "\n";
+print C_FILE "};\n";
+
+#
+# Encode the OIDs
+#
+my @encoded_oids = ();
+
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = ();
+
+ my @components = split(/[.]/, $oids[$i]);
+
+ push @octets, $components[0] * 40 + $components[1];
+
+ for (my $loop = 2; $loop <= $#components; $loop++) {
+ my $c = $components[$loop];
+
+ # Base128 encode the number
+ my $tmp = ($c == 0) ? 0 : int(log($c)/log(2));
+ $tmp = int($tmp / 7);
+
+ for (; $tmp > 0; $tmp--) {
+ push @octets, (($c >> $tmp * 7) & 0x7f) | 0x80;
+ }
+ push @octets, $c & 0x7f;
+ }
+
+ push @encoded_oids, \@octets;
+}
+
+#
+# Create a hash value for each OID
+#
+my @hash_values = ();
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = @{$encoded_oids[$i]};
+
+ my $hash = $#octets;
+ foreach (@octets) {
+ $hash += $_ * 33;
+ }
+
+ $hash = ($hash >> 24) ^ ($hash >> 16) ^ ($hash >> 8) ^ ($hash);
+
+ push @hash_values, $hash & 0xff;
+}
+
+#
+# Emit the OID data
+#
+print C_FILE "\n";
+print C_FILE "static const unsigned char oid_data[", $total_length, "] = {\n";
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = @{$encoded_oids[$i]};
+ print C_FILE "\t";
+ print C_FILE $_, ", " foreach (@octets);
+ print C_FILE "\t// ", $names[$i];
+ print C_FILE "\n";
+}
+print C_FILE "};\n";
+
+#
+# Build the search index table (ordered by length then hash then content)
+#
+my @index_table = ( 0 .. $#names );
+
+@index_table = sort {
+ my @octets_a = @{$encoded_oids[$a]};
+ my @octets_b = @{$encoded_oids[$b]};
+
+ return $hash_values[$a] <=> $hash_values[$b]
+ if ($hash_values[$a] != $hash_values[$b]);
+ return $#octets_a <=> $#octets_b
+ if ($#octets_a != $#octets_b);
+ for (my $i = $#octets_a; $i >= 0; $i--) {
+ return $octets_a[$i] <=> $octets_b[$i]
+ if ($octets_a[$i] != $octets_b[$i]);
+ }
+ return 0;
+
+} @index_table;
+
+#
+# Emit the search index and hash value table
+#
+print C_FILE "\n";
+print C_FILE "static const struct {\n";
+print C_FILE "\tunsigned char hash;\n";
+if ($#names <= 255) {
+ print C_FILE "\tenum OID oid : 8;\n";
+} else {
+ print C_FILE "\tenum OID oid : 16;\n";
+}
+print C_FILE "} oid_search_table[OID__NR] = {\n";
+for (my $i = 0; $i <= $#names; $i++) {
+ my @octets = @{$encoded_oids[$index_table[$i]]};
+ printf(C_FILE "\t[%3u] = { %3u, OID_%-35s }, // ",
+ $i,
+ $hash_values[$index_table[$i]],
+ $names[$index_table[$i]]);
+ printf C_FILE "%02x", $_ foreach (@octets);
+ print C_FILE "\n";
+}
+print C_FILE "};\n";
+
+#
+# Emit the OID debugging name table
+#
+#print C_FILE "\n";
+#print C_FILE "const char *const oid_name_table[OID__NR + 1] = {\n";
+#
+#for (my $i = 0; $i <= $#names; $i++) {
+# print C_FILE "\t\"", $names[$i], "\",\n"
+#}
+#print C_FILE "\t\"Unknown-OID\"\n";
+#print C_FILE "};\n";
+
+#
+# Polish off
+#
+close C_FILE or die;
diff --git a/lib/buildid.c b/lib/buildid.c
new file mode 100644
index 000000000..e3a7acdee
--- /dev/null
+++ b/lib/buildid.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/buildid.h>
+#include <linux/cache.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+
+#define BUILD_ID 3
+
+/*
+ * Parse build id from the note segment. This logic can be shared between
+ * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
+ * identical.
+ */
+static int parse_build_id_buf(unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ Elf32_Word note_offs = 0, new_offs;
+
+ while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
+ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
+
+ if (nhdr->n_type == BUILD_ID &&
+ nhdr->n_namesz == sizeof("GNU") &&
+ !strcmp((char *)(nhdr + 1), "GNU") &&
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
+ memcpy(build_id,
+ note_start + note_offs +
+ ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BUILD_ID_SIZE_MAX - nhdr->n_descsz);
+ if (size)
+ *size = nhdr->n_descsz;
+ return 0;
+ }
+ new_offs = note_offs + sizeof(Elf32_Nhdr) +
+ ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
+ if (new_offs <= note_offs) /* overflow */
+ break;
+ note_offs = new_offs;
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_build_id(const void *page_addr,
+ unsigned char *build_id,
+ __u32 *size,
+ const void *note_start,
+ Elf32_Word note_size)
+{
+ /* check for overflow */
+ if (note_start < page_addr || note_start + note_size < note_start)
+ return -EINVAL;
+
+ /* only supports note that fits in the first page */
+ if (note_start + note_size > page_addr + PAGE_SIZE)
+ return -EINVAL;
+
+ return parse_build_id_buf(build_id, size, note_start, note_size);
+}
+
+/* Parse build ID from 32-bit ELF */
+static int get_build_id_32(const void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
+ Elf32_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/* Parse build ID from 64-bit ELF */
+static int get_build_id_64(const void *page_addr, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
+ Elf64_Phdr *phdr;
+ int i;
+
+ /* only supports phdr that fits in one page */
+ if (ehdr->e_phnum >
+ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
+ return -EINVAL;
+
+ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
+
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ if (phdr[i].p_type == PT_NOTE &&
+ !parse_build_id(page_addr, build_id, size,
+ page_addr + phdr[i].p_offset,
+ phdr[i].p_filesz))
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Parse build ID of ELF file mapped to vma
+ * @vma: vma object
+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long
+ * @size: returns actual build id size in case of success
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size)
+{
+ Elf32_Ehdr *ehdr;
+ struct page *page;
+ void *page_addr;
+ int ret;
+
+ /* only works for page backed storage */
+ if (!vma->vm_file)
+ return -EINVAL;
+
+ page = find_get_page(vma->vm_file->f_mapping, 0);
+ if (!page)
+ return -EFAULT; /* page not mapped */
+
+ ret = -EINVAL;
+ page_addr = kmap_atomic(page);
+ ehdr = (Elf32_Ehdr *)page_addr;
+
+ /* compare magic x7f "ELF" */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
+ goto out;
+
+ /* only support executable file and shared object file */
+ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
+ goto out;
+
+ if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
+ ret = get_build_id_32(page_addr, build_id, size);
+ else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
+ ret = get_build_id_64(page_addr, build_id, size);
+out:
+ kunmap_atomic(page_addr);
+ put_page(page);
+ return ret;
+}
+
+/**
+ * build_id_parse_buf - Get build ID from a buffer
+ * @buf: ELF note section(s) to parse
+ * @buf_size: Size of @buf in bytes
+ * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long
+ *
+ * Return: 0 on success, -EINVAL otherwise
+ */
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
+{
+ return parse_build_id_buf(build_id, NULL, buf, buf_size);
+}
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
+
+/**
+ * init_vmlinux_build_id - Compute and stash the running kernel's build ID
+ */
+void __init init_vmlinux_build_id(void)
+{
+ extern const void __start_notes __weak;
+ extern const void __stop_notes __weak;
+ unsigned int size = &__stop_notes - &__start_notes;
+
+ build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
+}
+#endif
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
new file mode 100644
index 000000000..bfd53972a
--- /dev/null
+++ b/lib/bust_spinlocks.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * lib/bust_spinlocks.c
+ *
+ * Provides a minimal bust_spinlocks for architectures which don't
+ * have one of their own.
+ *
+ * bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG()
+ * and panic() information from reaching the user.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <linux/tty.h>
+#include <linux/wait.h>
+#include <linux/vt_kern.h>
+#include <linux/console.h>
+
+void bust_spinlocks(int yes)
+{
+ if (yes) {
+ ++oops_in_progress;
+ } else {
+ console_unblank();
+ if (--oops_in_progress == 0)
+ wake_up_klogd();
+ }
+}
diff --git a/lib/check_signature.c b/lib/check_signature.c
new file mode 100644
index 000000000..43a7301da
--- /dev/null
+++ b/lib/check_signature.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/io.h>
+#include <linux/export.h>
+
+/**
+ * check_signature - find BIOS signatures
+ * @io_addr: mmio address to check
+ * @signature: signature block
+ * @length: length of signature
+ *
+ * Perform a signature comparison with the mmio address io_addr. This
+ * address should have been obtained by ioremap.
+ * Returns 1 on a match.
+ */
+
+int check_signature(const volatile void __iomem *io_addr,
+ const unsigned char *signature, int length)
+{
+ while (length--) {
+ if (readb(io_addr) != *signature)
+ return 0;
+ io_addr++;
+ signature++;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(check_signature);
diff --git a/lib/checksum.c b/lib/checksum.c
new file mode 100644
index 000000000..6860d6b05
--- /dev/null
+++ b/lib/checksum.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * IP/TCP/UDP checksumming routines
+ *
+ * Authors: Jorge Cwik, <jorge@laser.satlink.net>
+ * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ * Tom May, <ftom@netcom.com>
+ * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
+ * Lots of code moved from tcp.c and ip.c; see those files
+ * for more names.
+ *
+ * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
+ * Fixed some nasty bugs, causing some horrible crashes.
+ * A: At some points, the sum (%0) was used as
+ * length-counter instead of the length counter
+ * (%1). Thanks to Roman Hodek for pointing this out.
+ * B: GCC seems to mess up if one uses too many
+ * data-registers to hold input values and one tries to
+ * specify d0 and d1 as scratch registers. Letting gcc
+ * choose these registers itself solves the problem.
+ */
+
+/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
+ kills, so most of the assembly has to go. */
+
+#include <linux/export.h>
+#include <net/checksum.h>
+
+#include <asm/byteorder.h>
+
+#ifndef do_csum
+static inline unsigned short from32to16(unsigned int x)
+{
+ /* add up 16-bit and 16-bit for 16+c bit */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up carry.. */
+ x = (x & 0xffff) + (x >> 16);
+ return x;
+}
+
+static unsigned int do_csum(const unsigned char *buff, int len)
+{
+ int odd;
+ unsigned int result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+#ifdef __LITTLE_ENDIAN
+ result += (*buff << 8);
+#else
+ result = *buff;
+#endif
+ len--;
+ buff++;
+ }
+ if (len >= 2) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ len -= 2;
+ buff += 2;
+ }
+ if (len >= 4) {
+ const unsigned char *end = buff + ((unsigned)len & ~3);
+ unsigned int carry = 0;
+ do {
+ unsigned int w = *(unsigned int *) buff;
+ buff += 4;
+ result += carry;
+ result += w;
+ carry = (w > result);
+ } while (buff < end);
+ result += carry;
+ result = (result & 0xffff) + (result >> 16);
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+#ifdef __LITTLE_ENDIAN
+ result += *buff;
+#else
+ result += (*buff << 8);
+#endif
+ result = from32to16(result);
+ if (odd)
+ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+ return result;
+}
+#endif
+
+#ifndef ip_fast_csum
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ return (__force __sum16)~do_csum(iph, ihl*4);
+}
+EXPORT_SYMBOL(ip_fast_csum);
+#endif
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum wsum)
+{
+ unsigned int sum = (__force unsigned int)wsum;
+ unsigned int result = do_csum(buff, len);
+
+ /* add in old sum, and carry.. */
+ result += sum;
+ if (sum > result)
+ result += 1;
+ return (__force __wsum)result;
+}
+EXPORT_SYMBOL(csum_partial);
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+__sum16 ip_compute_csum(const void *buff, int len)
+{
+ return (__force __sum16)~do_csum(buff, len);
+}
+EXPORT_SYMBOL(ip_compute_csum);
+
+#ifndef csum_tcpudp_nofold
+static inline u32 from64to32(u64 x)
+{
+ /* add up 32-bit and 32-bit for 32+c bit */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up carry.. */
+ x = (x & 0xffffffff) + (x >> 32);
+ return (u32)x;
+}
+
+__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto, __wsum sum)
+{
+ unsigned long long s = (__force u32)sum;
+
+ s += (__force u32)saddr;
+ s += (__force u32)daddr;
+#ifdef __BIG_ENDIAN
+ s += proto + len;
+#else
+ s += (proto + len) << 8;
+#endif
+ return (__force __wsum)from64to32(s);
+}
+EXPORT_SYMBOL(csum_tcpudp_nofold);
+#endif
diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c
new file mode 100644
index 000000000..0eed92b77
--- /dev/null
+++ b/lib/checksum_kunit.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases csum_partial and csum_fold
+ */
+
+#include <kunit/test.h>
+#include <asm/checksum.h>
+
+#define MAX_LEN 512
+#define MAX_ALIGN 64
+#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN)
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 random_init_sum = 0x2847aab;
+static const u8 random_buf[] = {
+ 0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86,
+ 0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5,
+ 0xd3, 0x4f, 0xf5, 0x60, 0x50, 0x5c, 0xc7, 0xfa, 0x6d, 0x1a, 0xc7, 0xf0,
+ 0xd2, 0x2c, 0x12, 0x3d, 0x88, 0xe3, 0x14, 0x21, 0xb1, 0x5e, 0x45, 0x31,
+ 0xa2, 0x85, 0x36, 0x76, 0xba, 0xd8, 0xad, 0xbb, 0x9e, 0x49, 0x8f, 0xf7,
+ 0xce, 0xea, 0xef, 0xca, 0x2c, 0x29, 0xf7, 0x15, 0x5c, 0x1d, 0x4d, 0x09,
+ 0x1f, 0xe2, 0x14, 0x31, 0x8c, 0x07, 0x57, 0x23, 0x1f, 0x6f, 0x03, 0xe1,
+ 0x93, 0x19, 0x53, 0x03, 0x45, 0x49, 0x9a, 0x3b, 0x8e, 0x0c, 0x12, 0x5d,
+ 0x8a, 0xb8, 0x9b, 0x8c, 0x9a, 0x03, 0xe5, 0xa2, 0x43, 0xd2, 0x3b, 0x4e,
+ 0x7e, 0x30, 0x3c, 0x22, 0x2d, 0xc5, 0xfc, 0x9e, 0xdb, 0xc6, 0xf9, 0x69,
+ 0x12, 0x39, 0x1f, 0xa0, 0x11, 0x0c, 0x3f, 0xf5, 0x53, 0xc9, 0x30, 0xfb,
+ 0xb0, 0xdd, 0x21, 0x1d, 0x34, 0xe2, 0x65, 0x30, 0xf1, 0xe8, 0x1b, 0xe7,
+ 0x55, 0x0d, 0xeb, 0xbd, 0xcc, 0x9d, 0x24, 0xa4, 0xad, 0xa7, 0x93, 0x47,
+ 0x19, 0x2e, 0xc4, 0x5c, 0x3b, 0xc7, 0x6d, 0x95, 0x0c, 0x47, 0x60, 0xaf,
+ 0x5b, 0x47, 0xee, 0xdc, 0x31, 0x31, 0x14, 0x12, 0x7e, 0x9e, 0x45, 0xb1,
+ 0xc1, 0x69, 0x4b, 0x84, 0xfc, 0x88, 0xc1, 0x9e, 0x46, 0xb4, 0xc2, 0x25,
+ 0xc5, 0x6c, 0x4c, 0x22, 0x58, 0x5c, 0xbe, 0xff, 0xea, 0x88, 0x88, 0x7a,
+ 0xcb, 0x1c, 0x5d, 0x63, 0xa1, 0xf2, 0x33, 0x0c, 0xa2, 0x16, 0x0b, 0x6e,
+ 0x2b, 0x79, 0x58, 0xf7, 0xac, 0xd3, 0x6a, 0x3f, 0x81, 0x57, 0x48, 0x45,
+ 0xe3, 0x7c, 0xdc, 0xd6, 0x34, 0x7e, 0xe6, 0x73, 0xfa, 0xcb, 0x31, 0x18,
+ 0xa9, 0x0b, 0xee, 0x6b, 0x99, 0xb9, 0x2d, 0xde, 0x22, 0x0e, 0x71, 0x57,
+ 0x0e, 0x9b, 0x11, 0xd1, 0x15, 0x41, 0xd0, 0x6b, 0x50, 0x8a, 0x23, 0x64,
+ 0xe3, 0x9c, 0xb3, 0x55, 0x09, 0xe9, 0x32, 0x67, 0xf9, 0xe0, 0x73, 0xf1,
+ 0x60, 0x66, 0x0b, 0x88, 0x79, 0x8d, 0x4b, 0x52, 0x83, 0x20, 0x26, 0x78,
+ 0x49, 0x27, 0xe7, 0x3e, 0x29, 0xa8, 0x18, 0x82, 0x41, 0xdd, 0x1e, 0xcc,
+ 0x3b, 0xc4, 0x65, 0xd1, 0x21, 0x40, 0x72, 0xb2, 0x87, 0x5e, 0x16, 0x10,
+ 0x80, 0x3f, 0x4b, 0x58, 0x1c, 0xc2, 0x79, 0x20, 0xf0, 0xe0, 0x80, 0xd3,
+ 0x52, 0xa5, 0x19, 0x6e, 0x47, 0x90, 0x08, 0xf5, 0x50, 0xe2, 0xd6, 0xae,
+ 0xe9, 0x2e, 0xdc, 0xd5, 0xb4, 0x90, 0x1f, 0x79, 0x49, 0x82, 0x21, 0x84,
+ 0xa0, 0xb5, 0x2f, 0xff, 0x30, 0x71, 0xed, 0x80, 0x68, 0xb1, 0x6d, 0xef,
+ 0xf6, 0xcf, 0xb8, 0x41, 0x79, 0xf5, 0x01, 0xbc, 0x0c, 0x9b, 0x0e, 0x06,
+ 0xf3, 0xb0, 0xbb, 0x97, 0xb8, 0xb1, 0xfd, 0x51, 0x4e, 0xef, 0x0a, 0x3d,
+ 0x7a, 0x3d, 0xbd, 0x61, 0x00, 0xa2, 0xb3, 0xf0, 0x1d, 0x77, 0x7b, 0x6c,
+ 0x01, 0x61, 0xa5, 0xa3, 0xdb, 0xd5, 0xd5, 0xf4, 0xb5, 0x28, 0x9f, 0x0a,
+ 0xa3, 0x82, 0x5f, 0x4b, 0x40, 0x0f, 0x05, 0x0e, 0x78, 0xed, 0xbf, 0x17,
+ 0xf6, 0x5a, 0x8a, 0x7d, 0xf9, 0x45, 0xc1, 0xd7, 0x1b, 0x9d, 0x6c, 0x07,
+ 0x88, 0xf3, 0xbc, 0xf1, 0xea, 0x28, 0x1f, 0xb8, 0x7a, 0x60, 0x3c, 0xce,
+ 0x3e, 0x50, 0xb2, 0x0b, 0xcf, 0xe5, 0x08, 0x1f, 0x48, 0x04, 0xf9, 0x35,
+ 0x29, 0x15, 0xbe, 0x82, 0x96, 0xc2, 0x55, 0x04, 0x6c, 0x19, 0x45, 0x29,
+ 0x0b, 0xb6, 0x49, 0x12, 0xfb, 0x8d, 0x1b, 0x75, 0x8b, 0xd9, 0x6a, 0x5c,
+ 0xbe, 0x46, 0x2b, 0x41, 0xfe, 0x21, 0xad, 0x1f, 0x75, 0xe7, 0x90, 0x3d,
+ 0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92,
+ 0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09
+};
+
+/* Values for a little endian CPU. Byte swap on big endian CPU. */
+static const u16 expected_results[] = {
+ 0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab,
+ 0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654,
+ 0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527,
+ 0xf432, 0x9432, 0x93e2, 0x37e2, 0x371b, 0x3d1a, 0x3cad, 0x22ad, 0x21e6,
+ 0x31e5, 0x3113, 0x0513, 0x0501, 0xc800, 0xc778, 0xe477, 0xe463, 0xc363,
+ 0xc2b2, 0x64b2, 0x646d, 0x336d, 0x32cb, 0xadca, 0xad94, 0x3794, 0x36da,
+ 0x5ed9, 0x5e2c, 0xa32b, 0xa28d, 0x598d, 0x58fe, 0x61fd, 0x612f, 0x772e,
+ 0x763f, 0xac3e, 0xac12, 0x8312, 0x821b, 0x6d1b, 0x6cbf, 0x4fbf, 0x4f72,
+ 0x4672, 0x4653, 0x6452, 0x643e, 0x333e, 0x32b2, 0x2bb2, 0x2b5b, 0x085b,
+ 0x083c, 0x993b, 0x9938, 0xb837, 0xb7a4, 0x9ea4, 0x9e51, 0x9b51, 0x9b0c,
+ 0x520c, 0x5172, 0x1672, 0x15e4, 0x09e4, 0x09d2, 0xacd1, 0xac47, 0xf446,
+ 0xf3ab, 0x67ab, 0x6711, 0x6411, 0x632c, 0xc12b, 0xc0e8, 0xeee7, 0xeeac,
+ 0xa0ac, 0xa02e, 0x702e, 0x6ff2, 0x4df2, 0x4dc5, 0x88c4, 0x87c8, 0xe9c7,
+ 0xe8ec, 0x22ec, 0x21f3, 0xb8f2, 0xb8e0, 0x7fe0, 0x7fc1, 0xdfc0, 0xdfaf,
+ 0xd3af, 0xd370, 0xde6f, 0xde1c, 0x151c, 0x14ec, 0x19eb, 0x193b, 0x3c3a,
+ 0x3c19, 0x1f19, 0x1ee5, 0x3ce4, 0x3c7f, 0x0c7f, 0x0b8e, 0x238d, 0x2372,
+ 0x3c71, 0x3c1c, 0x2f1c, 0x2e31, 0x7130, 0x7064, 0xd363, 0xd33f, 0x2f3f,
+ 0x2e92, 0x8791, 0x86fe, 0x3ffe, 0x3fe5, 0x11e5, 0x1121, 0xb520, 0xb4e5,
+ 0xede4, 0xed77, 0x5877, 0x586b, 0x116b, 0x110b, 0x620a, 0x61af, 0x1aaf,
+ 0x19c1, 0x3dc0, 0x3d8f, 0x0c8f, 0x0c7b, 0xfa7a, 0xf9fc, 0x5bfc, 0x5bb7,
+ 0xaab6, 0xa9f5, 0x40f5, 0x40aa, 0xbca9, 0xbbad, 0x33ad, 0x32ec, 0x94eb,
+ 0x94a5, 0xe0a4, 0xdfe2, 0xbae2, 0xba1d, 0x4e1d, 0x4dd1, 0x2bd1, 0x2b79,
+ 0xcf78, 0xceba, 0xcfb9, 0xcecf, 0x46cf, 0x4647, 0xcc46, 0xcb7b, 0xaf7b,
+ 0xaf1e, 0x4c1e, 0x4b7d, 0x597c, 0x5949, 0x4d49, 0x4ca7, 0x36a7, 0x369c,
+ 0xc89b, 0xc870, 0x4f70, 0x4f18, 0x5817, 0x576b, 0x846a, 0x8400, 0x4500,
+ 0x447f, 0xed7e, 0xed36, 0xa836, 0xa753, 0x2b53, 0x2a77, 0x5476, 0x5442,
+ 0xd641, 0xd55b, 0x625b, 0x6161, 0x9660, 0x962f, 0x7e2f, 0x7d86, 0x7286,
+ 0x7198, 0x0698, 0x05ff, 0x4cfe, 0x4cd1, 0x6ed0, 0x6eae, 0x60ae, 0x603d,
+ 0x093d, 0x092f, 0x6e2e, 0x6e1d, 0x9d1c, 0x9d07, 0x5c07, 0x5b37, 0xf036,
+ 0xefe6, 0x65e6, 0x65c3, 0x01c3, 0x00e0, 0x64df, 0x642c, 0x0f2c, 0x0f23,
+ 0x2622, 0x25f0, 0xbeef, 0xbdf6, 0xddf5, 0xdd82, 0xec81, 0xec21, 0x8621,
+ 0x8616, 0xfe15, 0xfd9c, 0x709c, 0x7051, 0x1e51, 0x1dce, 0xfdcd, 0xfda7,
+ 0x85a7, 0x855e, 0x5e5e, 0x5d77, 0x1f77, 0x1f4e, 0x774d, 0x7735, 0xf534,
+ 0xf4f3, 0x17f3, 0x17d5, 0x4bd4, 0x4b99, 0x8798, 0x8733, 0xb632, 0xb611,
+ 0x7611, 0x759f, 0xc39e, 0xc317, 0x6517, 0x6501, 0x5501, 0x5481, 0x1581,
+ 0x1536, 0xbd35, 0xbd19, 0xfb18, 0xfa9f, 0xda9f, 0xd9af, 0xf9ae, 0xf92e,
+ 0x262e, 0x25dc, 0x80db, 0x80c2, 0x12c2, 0x127b, 0x827a, 0x8272, 0x8d71,
+ 0x8d21, 0xab20, 0xaa4a, 0xfc49, 0xfb60, 0xcd60, 0xcc84, 0xf783, 0xf6cf,
+ 0x66cf, 0x66b0, 0xedaf, 0xed66, 0x6b66, 0x6b45, 0xe744, 0xe6a4, 0x31a4,
+ 0x3175, 0x3274, 0x3244, 0xc143, 0xc056, 0x4056, 0x3fee, 0x8eed, 0x8e80,
+ 0x9f7f, 0x9e89, 0xcf88, 0xced0, 0x8dd0, 0x8d57, 0x9856, 0x9855, 0xdc54,
+ 0xdc48, 0x4148, 0x413a, 0x3b3a, 0x3a47, 0x8a46, 0x898b, 0xf28a, 0xf1d2,
+ 0x40d2, 0x3fd5, 0xeed4, 0xee86, 0xff85, 0xff7b, 0xc27b, 0xc201, 0x8501,
+ 0x8444, 0x2344, 0x2344, 0x8143, 0x8090, 0x908f, 0x9072, 0x1972, 0x18f7,
+ 0xacf6, 0xacf5, 0x4bf5, 0x4b50, 0xa84f, 0xa774, 0xd273, 0xd19e, 0xdd9d,
+ 0xdce8, 0xb4e8, 0xb449, 0xaa49, 0xa9a6, 0x27a6, 0x2747, 0xdc46, 0xdc06,
+ 0xcd06, 0xcd01, 0xbf01, 0xbe89, 0xd188, 0xd0c9, 0xb9c9, 0xb8d3, 0x5ed3,
+ 0x5e49, 0xe148, 0xe04f, 0x9b4f, 0x9a8e, 0xc38d, 0xc372, 0x2672, 0x2606,
+ 0x1f06, 0x1e7e, 0x2b7d, 0x2ac1, 0x39c0, 0x38d6, 0x10d6, 0x10b7, 0x58b6,
+ 0x583c, 0xf83b, 0xf7ff, 0x29ff, 0x29c1, 0xd9c0, 0xd90e, 0xce0e, 0xcd3f,
+ 0xe83e, 0xe836, 0xc936, 0xc8ee, 0xc4ee, 0xc3f5, 0x8ef5, 0x8ecc, 0x79cc,
+ 0x790e, 0xf70d, 0xf677, 0x3477, 0x3422, 0x3022, 0x2fb6, 0x16b6, 0x1671,
+ 0xed70, 0xed65, 0x3765, 0x371c, 0x251c, 0x2421, 0x9720, 0x9705, 0x2205,
+ 0x217a, 0x4879, 0x480f, 0xec0e, 0xeb50, 0xa550, 0xa525, 0x6425, 0x6327,
+ 0x4227, 0x417a, 0x227a, 0x2205, 0x3b04, 0x3a74, 0xfd73, 0xfc92, 0x1d92,
+ 0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6,
+ 0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6,
+};
+
+/* Values for a little endian CPU. Byte swap each half on big endian CPU. */
+static const u32 init_sums_no_overflow[] = {
+ 0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3,
+ 0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7,
+ 0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb,
+ 0xffffdbdb, 0xffffdbd7, 0xffffd7d7, 0xffffd7d3, 0xffffd3d3, 0xffffd3cf,
+ 0xffffcfcf, 0xffffcfcb, 0xffffcbcb, 0xffffcbc7, 0xffffc7c7, 0xffffc7c3,
+ 0xffffc3c3, 0xffffc3bf, 0xffffbfbf, 0xffffbfbb, 0xffffbbbb, 0xffffbbb7,
+ 0xffffb7b7, 0xffffb7b3, 0xffffb3b3, 0xffffb3af, 0xffffafaf, 0xffffafab,
+ 0xffffabab, 0xffffaba7, 0xffffa7a7, 0xffffa7a3, 0xffffa3a3, 0xffffa39f,
+ 0xffff9f9f, 0xffff9f9b, 0xffff9b9b, 0xffff9b97, 0xffff9797, 0xffff9793,
+ 0xffff9393, 0xffff938f, 0xffff8f8f, 0xffff8f8b, 0xffff8b8b, 0xffff8b87,
+ 0xffff8787, 0xffff8783, 0xffff8383, 0xffff837f, 0xffff7f7f, 0xffff7f7b,
+ 0xffff7b7b, 0xffff7b77, 0xffff7777, 0xffff7773, 0xffff7373, 0xffff736f,
+ 0xffff6f6f, 0xffff6f6b, 0xffff6b6b, 0xffff6b67, 0xffff6767, 0xffff6763,
+ 0xffff6363, 0xffff635f, 0xffff5f5f, 0xffff5f5b, 0xffff5b5b, 0xffff5b57,
+ 0xffff5757, 0xffff5753, 0xffff5353, 0xffff534f, 0xffff4f4f, 0xffff4f4b,
+ 0xffff4b4b, 0xffff4b47, 0xffff4747, 0xffff4743, 0xffff4343, 0xffff433f,
+ 0xffff3f3f, 0xffff3f3b, 0xffff3b3b, 0xffff3b37, 0xffff3737, 0xffff3733,
+ 0xffff3333, 0xffff332f, 0xffff2f2f, 0xffff2f2b, 0xffff2b2b, 0xffff2b27,
+ 0xffff2727, 0xffff2723, 0xffff2323, 0xffff231f, 0xffff1f1f, 0xffff1f1b,
+ 0xffff1b1b, 0xffff1b17, 0xffff1717, 0xffff1713, 0xffff1313, 0xffff130f,
+ 0xffff0f0f, 0xffff0f0b, 0xffff0b0b, 0xffff0b07, 0xffff0707, 0xffff0703,
+ 0xffff0303, 0xffff02ff, 0xfffffefe, 0xfffffefa, 0xfffffafa, 0xfffffaf6,
+ 0xfffff6f6, 0xfffff6f2, 0xfffff2f2, 0xfffff2ee, 0xffffeeee, 0xffffeeea,
+ 0xffffeaea, 0xffffeae6, 0xffffe6e6, 0xffffe6e2, 0xffffe2e2, 0xffffe2de,
+ 0xffffdede, 0xffffdeda, 0xffffdada, 0xffffdad6, 0xffffd6d6, 0xffffd6d2,
+ 0xffffd2d2, 0xffffd2ce, 0xffffcece, 0xffffceca, 0xffffcaca, 0xffffcac6,
+ 0xffffc6c6, 0xffffc6c2, 0xffffc2c2, 0xffffc2be, 0xffffbebe, 0xffffbeba,
+ 0xffffbaba, 0xffffbab6, 0xffffb6b6, 0xffffb6b2, 0xffffb2b2, 0xffffb2ae,
+ 0xffffaeae, 0xffffaeaa, 0xffffaaaa, 0xffffaaa6, 0xffffa6a6, 0xffffa6a2,
+ 0xffffa2a2, 0xffffa29e, 0xffff9e9e, 0xffff9e9a, 0xffff9a9a, 0xffff9a96,
+ 0xffff9696, 0xffff9692, 0xffff9292, 0xffff928e, 0xffff8e8e, 0xffff8e8a,
+ 0xffff8a8a, 0xffff8a86, 0xffff8686, 0xffff8682, 0xffff8282, 0xffff827e,
+ 0xffff7e7e, 0xffff7e7a, 0xffff7a7a, 0xffff7a76, 0xffff7676, 0xffff7672,
+ 0xffff7272, 0xffff726e, 0xffff6e6e, 0xffff6e6a, 0xffff6a6a, 0xffff6a66,
+ 0xffff6666, 0xffff6662, 0xffff6262, 0xffff625e, 0xffff5e5e, 0xffff5e5a,
+ 0xffff5a5a, 0xffff5a56, 0xffff5656, 0xffff5652, 0xffff5252, 0xffff524e,
+ 0xffff4e4e, 0xffff4e4a, 0xffff4a4a, 0xffff4a46, 0xffff4646, 0xffff4642,
+ 0xffff4242, 0xffff423e, 0xffff3e3e, 0xffff3e3a, 0xffff3a3a, 0xffff3a36,
+ 0xffff3636, 0xffff3632, 0xffff3232, 0xffff322e, 0xffff2e2e, 0xffff2e2a,
+ 0xffff2a2a, 0xffff2a26, 0xffff2626, 0xffff2622, 0xffff2222, 0xffff221e,
+ 0xffff1e1e, 0xffff1e1a, 0xffff1a1a, 0xffff1a16, 0xffff1616, 0xffff1612,
+ 0xffff1212, 0xffff120e, 0xffff0e0e, 0xffff0e0a, 0xffff0a0a, 0xffff0a06,
+ 0xffff0606, 0xffff0602, 0xffff0202, 0xffff01fe, 0xfffffdfd, 0xfffffdf9,
+ 0xfffff9f9, 0xfffff9f5, 0xfffff5f5, 0xfffff5f1, 0xfffff1f1, 0xfffff1ed,
+ 0xffffeded, 0xffffede9, 0xffffe9e9, 0xffffe9e5, 0xffffe5e5, 0xffffe5e1,
+ 0xffffe1e1, 0xffffe1dd, 0xffffdddd, 0xffffddd9, 0xffffd9d9, 0xffffd9d5,
+ 0xffffd5d5, 0xffffd5d1, 0xffffd1d1, 0xffffd1cd, 0xffffcdcd, 0xffffcdc9,
+ 0xffffc9c9, 0xffffc9c5, 0xffffc5c5, 0xffffc5c1, 0xffffc1c1, 0xffffc1bd,
+ 0xffffbdbd, 0xffffbdb9, 0xffffb9b9, 0xffffb9b5, 0xffffb5b5, 0xffffb5b1,
+ 0xffffb1b1, 0xffffb1ad, 0xffffadad, 0xffffada9, 0xffffa9a9, 0xffffa9a5,
+ 0xffffa5a5, 0xffffa5a1, 0xffffa1a1, 0xffffa19d, 0xffff9d9d, 0xffff9d99,
+ 0xffff9999, 0xffff9995, 0xffff9595, 0xffff9591, 0xffff9191, 0xffff918d,
+ 0xffff8d8d, 0xffff8d89, 0xffff8989, 0xffff8985, 0xffff8585, 0xffff8581,
+ 0xffff8181, 0xffff817d, 0xffff7d7d, 0xffff7d79, 0xffff7979, 0xffff7975,
+ 0xffff7575, 0xffff7571, 0xffff7171, 0xffff716d, 0xffff6d6d, 0xffff6d69,
+ 0xffff6969, 0xffff6965, 0xffff6565, 0xffff6561, 0xffff6161, 0xffff615d,
+ 0xffff5d5d, 0xffff5d59, 0xffff5959, 0xffff5955, 0xffff5555, 0xffff5551,
+ 0xffff5151, 0xffff514d, 0xffff4d4d, 0xffff4d49, 0xffff4949, 0xffff4945,
+ 0xffff4545, 0xffff4541, 0xffff4141, 0xffff413d, 0xffff3d3d, 0xffff3d39,
+ 0xffff3939, 0xffff3935, 0xffff3535, 0xffff3531, 0xffff3131, 0xffff312d,
+ 0xffff2d2d, 0xffff2d29, 0xffff2929, 0xffff2925, 0xffff2525, 0xffff2521,
+ 0xffff2121, 0xffff211d, 0xffff1d1d, 0xffff1d19, 0xffff1919, 0xffff1915,
+ 0xffff1515, 0xffff1511, 0xffff1111, 0xffff110d, 0xffff0d0d, 0xffff0d09,
+ 0xffff0909, 0xffff0905, 0xffff0505, 0xffff0501, 0xffff0101, 0xffff00fd,
+ 0xfffffcfc, 0xfffffcf8, 0xfffff8f8, 0xfffff8f4, 0xfffff4f4, 0xfffff4f0,
+ 0xfffff0f0, 0xfffff0ec, 0xffffecec, 0xffffece8, 0xffffe8e8, 0xffffe8e4,
+ 0xffffe4e4, 0xffffe4e0, 0xffffe0e0, 0xffffe0dc, 0xffffdcdc, 0xffffdcd8,
+ 0xffffd8d8, 0xffffd8d4, 0xffffd4d4, 0xffffd4d0, 0xffffd0d0, 0xffffd0cc,
+ 0xffffcccc, 0xffffccc8, 0xffffc8c8, 0xffffc8c4, 0xffffc4c4, 0xffffc4c0,
+ 0xffffc0c0, 0xffffc0bc, 0xffffbcbc, 0xffffbcb8, 0xffffb8b8, 0xffffb8b4,
+ 0xffffb4b4, 0xffffb4b0, 0xffffb0b0, 0xffffb0ac, 0xffffacac, 0xffffaca8,
+ 0xffffa8a8, 0xffffa8a4, 0xffffa4a4, 0xffffa4a0, 0xffffa0a0, 0xffffa09c,
+ 0xffff9c9c, 0xffff9c98, 0xffff9898, 0xffff9894, 0xffff9494, 0xffff9490,
+ 0xffff9090, 0xffff908c, 0xffff8c8c, 0xffff8c88, 0xffff8888, 0xffff8884,
+ 0xffff8484, 0xffff8480, 0xffff8080, 0xffff807c, 0xffff7c7c, 0xffff7c78,
+ 0xffff7878, 0xffff7874, 0xffff7474, 0xffff7470, 0xffff7070, 0xffff706c,
+ 0xffff6c6c, 0xffff6c68, 0xffff6868, 0xffff6864, 0xffff6464, 0xffff6460,
+ 0xffff6060, 0xffff605c, 0xffff5c5c, 0xffff5c58, 0xffff5858, 0xffff5854,
+ 0xffff5454, 0xffff5450, 0xffff5050, 0xffff504c, 0xffff4c4c, 0xffff4c48,
+ 0xffff4848, 0xffff4844, 0xffff4444, 0xffff4440, 0xffff4040, 0xffff403c,
+ 0xffff3c3c, 0xffff3c38, 0xffff3838, 0xffff3834, 0xffff3434, 0xffff3430,
+ 0xffff3030, 0xffff302c, 0xffff2c2c, 0xffff2c28, 0xffff2828, 0xffff2824,
+ 0xffff2424, 0xffff2420, 0xffff2020, 0xffff201c, 0xffff1c1c, 0xffff1c18,
+ 0xffff1818, 0xffff1814, 0xffff1414, 0xffff1410, 0xffff1010, 0xffff100c,
+ 0xffff0c0c, 0xffff0c08, 0xffff0808, 0xffff0804, 0xffff0404, 0xffff0400,
+ 0xffff0000, 0xfffffffb,
+};
+
+static u8 tmp_buf[TEST_BUFLEN];
+
+#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum))
+
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs)
+
+static __sum16 to_sum16(u16 x)
+{
+ return (__force __sum16)le16_to_cpu((__force __le16)x);
+}
+
+/* This function swaps the bytes inside each half of a __wsum */
+static __wsum to_wsum(u32 x)
+{
+ u16 hi = le16_to_cpu((__force __le16)(x >> 16));
+ u16 lo = le16_to_cpu((__force __le16)x);
+
+ return (__force __wsum)((hi << 16) | lo);
+}
+
+static void assert_setup_correct(struct kunit *test)
+{
+ CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN);
+ CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]),
+ MAX_LEN);
+ CHECK_EQ(sizeof(init_sums_no_overflow) /
+ sizeof(init_sums_no_overflow[0]),
+ MAX_LEN);
+}
+
+/*
+ * Test with randomized input (pre determined random with known results).
+ */
+static void test_csum_fixed_random_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ memcpy(&tmp_buf[align], random_buf,
+ min(MAX_LEN, TEST_BUFLEN - align));
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * Test the precomputed random input.
+ */
+ sum = to_wsum(random_init_sum);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16(expected_results[len]);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+/*
+ * All ones input test. If there are any missing carry operations, it fails.
+ */
+static void test_csum_all_carry_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ memset(tmp_buf, 0xff, TEST_BUFLEN);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * All carries from input and initial sum.
+ */
+ sum = to_wsum(0xffffffff);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16((len & 1) ? 0xff00 : 0);
+ CHECK_EQ(result, expec);
+
+ /*
+ * All carries from input.
+ */
+ sum = 0;
+ result = full_csum(&tmp_buf[align], len, sum);
+ if (len & 1)
+ expec = to_sum16(0xff00);
+ else if (len)
+ expec = 0;
+ else
+ expec = to_sum16(0xffff);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+/*
+ * Test with input that alone doesn't cause any carries. By selecting the
+ * maximum initial sum, this allows us to test that there are no carries
+ * where there shouldn't be.
+ */
+static void test_csum_no_carry_inputs(struct kunit *test)
+{
+ int len, align;
+ __wsum sum;
+ __sum16 result, expec;
+
+ assert_setup_correct(test);
+ memset(tmp_buf, 0x4, TEST_BUFLEN);
+ for (align = 0; align < TEST_BUFLEN; ++align) {
+ for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN;
+ ++len) {
+ /*
+ * Expect no carries.
+ */
+ sum = to_wsum(init_sums_no_overflow[len]);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = 0;
+ CHECK_EQ(result, expec);
+
+ /*
+ * Expect one carry.
+ */
+ sum = to_wsum(init_sums_no_overflow[len] + 1);
+ result = full_csum(&tmp_buf[align], len, sum);
+ expec = to_sum16(len ? 0xfffe : 0xffff);
+ CHECK_EQ(result, expec);
+ }
+ }
+}
+
+static struct kunit_case __refdata checksum_test_cases[] = {
+ KUNIT_CASE(test_csum_fixed_random_inputs),
+ KUNIT_CASE(test_csum_all_carry_inputs),
+ KUNIT_CASE(test_csum_no_carry_inputs),
+ {}
+};
+
+static struct kunit_suite checksum_test_suite = {
+ .name = "checksum",
+ .test_cases = checksum_test_cases,
+};
+
+kunit_test_suites(&checksum_test_suite);
+
+MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
new file mode 100644
index 000000000..fb8c0c5c2
--- /dev/null
+++ b/lib/clz_ctz.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * lib/clz_ctz.c
+ *
+ * Copyright (C) 2013 Chanho Min <chanho.min@lge.com>
+ *
+ * The functions in this file aren't called directly, but are required by
+ * GCC builtins such as __builtin_ctz, and therefore they can't be removed
+ * despite appearing unreferenced in kernel source.
+ *
+ * __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+int __weak __ctzsi2(int val);
+int __weak __ctzsi2(int val)
+{
+ return __ffs(val);
+}
+EXPORT_SYMBOL(__ctzsi2);
+
+int __weak __clzsi2(int val);
+int __weak __clzsi2(int val)
+{
+ return 32 - fls(val);
+}
+EXPORT_SYMBOL(__clzsi2);
+
+int __weak __clzdi2(u64 val);
+int __weak __clzdi2(u64 val)
+{
+ return 64 - fls64(val);
+}
+EXPORT_SYMBOL(__clzdi2);
+
+int __weak __ctzdi2(u64 val);
+int __weak __ctzdi2(u64 val)
+{
+ return __ffs64(val);
+}
+EXPORT_SYMBOL(__ctzdi2);
diff --git a/lib/clz_tab.c b/lib/clz_tab.c
new file mode 100644
index 000000000..b6118d09f
--- /dev/null
+++ b/lib/clz_tab.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+const unsigned char __clz_tab[] = {
+ 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+};
diff --git a/lib/cmdline.c b/lib/cmdline.c
new file mode 100644
index 000000000..90ed997d9
--- /dev/null
+++ b/lib/cmdline.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/cmdline.c
+ * Helper functions generally used for parsing kernel command line
+ * and module options.
+ *
+ * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
+ *
+ * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+/*
+ * If a hyphen was found in get_option, this will handle the
+ * range of numbers, M-N. This will expand the range and insert
+ * the values[M, M+1, ..., N] into the ints array in get_options.
+ */
+
+static int get_range(char **str, int *pint, int n)
+{
+ int x, inc_counter, upper_range;
+
+ (*str)++;
+ upper_range = simple_strtol((*str), NULL, 0);
+ inc_counter = upper_range - *pint;
+ for (x = *pint; n && x < upper_range; x++, n--)
+ *pint++ = x;
+ return inc_counter;
+}
+
+/**
+ * get_option - Parse integer from an option string
+ * @str: option string
+ * @pint: (optional output) integer value parsed from @str
+ *
+ * Read an int from an option string; if available accept a subsequent
+ * comma as well.
+ *
+ * When @pint is NULL the function can be used as a validator of
+ * the current option in the string.
+ *
+ * Return values:
+ * 0 - no int in string
+ * 1 - int found, no subsequent comma
+ * 2 - int found including a subsequent comma
+ * 3 - hyphen found to denote a range
+ *
+ * Leading hyphen without integer is no integer case, but we consume it
+ * for the sake of simplification.
+ */
+
+int get_option(char **str, int *pint)
+{
+ char *cur = *str;
+ int value;
+
+ if (!cur || !(*cur))
+ return 0;
+ if (*cur == '-')
+ value = -simple_strtoull(++cur, str, 0);
+ else
+ value = simple_strtoull(cur, str, 0);
+ if (pint)
+ *pint = value;
+ if (cur == *str)
+ return 0;
+ if (**str == ',') {
+ (*str)++;
+ return 2;
+ }
+ if (**str == '-')
+ return 3;
+
+ return 1;
+}
+EXPORT_SYMBOL(get_option);
+
+/**
+ * get_options - Parse a string into a list of integers
+ * @str: String to be parsed
+ * @nints: size of integer array
+ * @ints: integer array (must have room for at least one element)
+ *
+ * This function parses a string containing a comma-separated
+ * list of integers, a hyphen-separated range of _positive_ integers,
+ * or a combination of both. The parse halts when the array is
+ * full, or when no more numbers can be retrieved from the
+ * string.
+ *
+ * When @nints is 0, the function just validates the given @str and
+ * returns the amount of parseable integers as described below.
+ *
+ * Returns:
+ *
+ * The first element is filled by the number of collected integers
+ * in the range. The rest is what was parsed from the @str.
+ *
+ * Return value is the character in the string which caused
+ * the parse to end (typically a null terminator, if @str is
+ * completely parseable).
+ */
+
+char *get_options(const char *str, int nints, int *ints)
+{
+ bool validate = (nints == 0);
+ int res, i = 1;
+
+ while (i < nints || validate) {
+ int *pint = validate ? ints : ints + i;
+
+ res = get_option((char **)&str, pint);
+ if (res == 0)
+ break;
+ if (res == 3) {
+ int n = validate ? 0 : nints - i;
+ int range_nums;
+
+ range_nums = get_range((char **)&str, pint, n);
+ if (range_nums < 0)
+ break;
+ /*
+ * Decrement the result by one to leave out the
+ * last number in the range. The next iteration
+ * will handle the upper number in the range
+ */
+ i += (range_nums - 1);
+ }
+ i++;
+ if (res == 1)
+ break;
+ }
+ ints[0] = i - 1;
+ return (char *)str;
+}
+EXPORT_SYMBOL(get_options);
+
+/**
+ * memparse - parse a string with mem suffixes into a number
+ * @ptr: Where parse begins
+ * @retptr: (output) Optional pointer to next char after parse completes
+ *
+ * Parses a string into a number. The number stored at @ptr is
+ * potentially suffixed with K, M, G, T, P, E.
+ */
+
+unsigned long long memparse(const char *ptr, char **retptr)
+{
+ char *endptr; /* local pointer to end of parsed string */
+
+ unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
+
+ switch (*endptr) {
+ case 'E':
+ case 'e':
+ ret <<= 10;
+ fallthrough;
+ case 'P':
+ case 'p':
+ ret <<= 10;
+ fallthrough;
+ case 'T':
+ case 't':
+ ret <<= 10;
+ fallthrough;
+ case 'G':
+ case 'g':
+ ret <<= 10;
+ fallthrough;
+ case 'M':
+ case 'm':
+ ret <<= 10;
+ fallthrough;
+ case 'K':
+ case 'k':
+ ret <<= 10;
+ endptr++;
+ fallthrough;
+ default:
+ break;
+ }
+
+ if (retptr)
+ *retptr = endptr;
+
+ return ret;
+}
+EXPORT_SYMBOL(memparse);
+
+/**
+ * parse_option_str - Parse a string and check an option is set or not
+ * @str: String to be parsed
+ * @option: option name
+ *
+ * This function parses a string containing a comma-separated list of
+ * strings like a=b,c.
+ *
+ * Return true if there's such option in the string, or return false.
+ */
+bool parse_option_str(const char *str, const char *option)
+{
+ while (*str) {
+ if (!strncmp(str, option, strlen(option))) {
+ str += strlen(option);
+ if (!*str || *str == ',')
+ return true;
+ }
+
+ while (*str && *str != ',')
+ str++;
+
+ if (*str == ',')
+ str++;
+ }
+
+ return false;
+}
+
+/*
+ * Parse a string to get a param value pair.
+ * You can use " around spaces, but can't escape ".
+ * Hyphens and underscores equivalent in parameter names.
+ */
+char *next_arg(char *args, char **param, char **val)
+{
+ unsigned int i, equals = 0;
+ int in_quote = 0, quoted = 0;
+
+ if (*args == '"') {
+ args++;
+ in_quote = 1;
+ quoted = 1;
+ }
+
+ for (i = 0; args[i]; i++) {
+ if (isspace(args[i]) && !in_quote)
+ break;
+ if (equals == 0) {
+ if (args[i] == '=')
+ equals = i;
+ }
+ if (args[i] == '"')
+ in_quote = !in_quote;
+ }
+
+ *param = args;
+ if (!equals)
+ *val = NULL;
+ else {
+ args[equals] = '\0';
+ *val = args + equals + 1;
+
+ /* Don't include quotes in value. */
+ if (**val == '"') {
+ (*val)++;
+ if (args[i-1] == '"')
+ args[i-1] = '\0';
+ }
+ }
+ if (quoted && i > 0 && args[i-1] == '"')
+ args[i-1] = '\0';
+
+ if (args[i]) {
+ args[i] = '\0';
+ args += i + 1;
+ } else
+ args += i;
+
+ /* Chew up trailing spaces. */
+ return skip_spaces(args);
+}
+EXPORT_SYMBOL(next_arg);
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
new file mode 100644
index 000000000..d4572dbc9
--- /dev/null
+++ b/lib/cmdline_kunit.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for API provided by cmdline.c
+ */
+
+#include <kunit/test.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const char *cmdline_test_strings[] = {
+ "\"\"", "" , "=" , "\"-", "," , "-," , ",-" , "-" ,
+ "+," , "--", ",,", "''" , "\"\",", "\",\"", "-\"\"", "\"",
+};
+
+static const int cmdline_test_values[] = {
+ 1, 1, 1, 1, 2, 3, 2, 3,
+ 1, 3, 2, 1, 1, 1, 3, 1,
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_strings) == ARRAY_SIZE(cmdline_test_values));
+
+static const char *cmdline_test_range_strings[] = {
+ "-7" , "--7" , "-1-2" , "7--9",
+ "7-" , "-7--9", "7-9," , "9-7" ,
+ "5-a", "a-5" , "5-8" , ",8-5",
+ "+,1", "-,4" , "-3,0-1,6", "4,-" ,
+ " +2", " -9" , "0-1,-3,6", "- 9" ,
+};
+
+static const int cmdline_test_range_values[][16] = {
+ { 1, -7, }, { 0, -0, }, { 4, -1, 0, +1, 2, }, { 0, 7, },
+ { 0, +7, }, { 0, -7, }, { 3, +7, 8, +9, 0, }, { 0, 9, },
+ { 0, +5, }, { 0, -0, }, { 4, +5, 6, +7, 8, }, { 0, 0, },
+ { 0, +0, }, { 0, -0, }, { 4, -3, 0, +1, 6, }, { 1, 4, },
+ { 0, +0, }, { 0, -0, }, { 4, +0, 1, -3, 6, }, { 0, 0, },
+};
+
+static_assert(ARRAY_SIZE(cmdline_test_range_strings) == ARRAY_SIZE(cmdline_test_range_values));
+
+static void cmdline_do_one_test(struct kunit *test, const char *in, int rc, int offset)
+{
+ const char *fmt = "Pattern: %s";
+ const char *out = in;
+ int dummy;
+ int ret;
+
+ ret = get_option((char **)&out, &dummy);
+
+ KUNIT_EXPECT_EQ_MSG(test, ret, rc, fmt, in);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, out, in + offset, fmt, in);
+}
+
+static void cmdline_test_noint(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = 0;
+ int offset;
+
+ /* Only first and leading '-' will advance the pointer */
+ offset = !!(*str == '-');
+ cmdline_do_one_test(test, str, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_lead_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ int rc = cmdline_test_values[i];
+ int offset;
+
+ sprintf(in, "%u%s", get_random_u8(), str);
+ /* Only first '-' after the number will advance the pointer */
+ offset = strlen(in) - strlen(str) + !!(rc == 2);
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_test_tail_int(struct kunit *test)
+{
+ unsigned int i = 0;
+ char in[32];
+
+ do {
+ const char *str = cmdline_test_strings[i];
+ /* When "" or "-" the result will be valid integer */
+ int rc = strcmp(str, "") ? (strcmp(str, "-") ? 0 : 1) : 1;
+ int offset;
+
+ sprintf(in, "%s%u", str, get_random_u8());
+ /*
+ * Only first and leading '-' not followed by integer
+ * will advance the pointer.
+ */
+ offset = rc ? strlen(in) : !!(*str == '-');
+ cmdline_do_one_test(test, in, rc, offset);
+ } while (++i < ARRAY_SIZE(cmdline_test_strings));
+}
+
+static void cmdline_do_one_range_test(struct kunit *test, const char *in,
+ unsigned int n, const int *e)
+{
+ unsigned int i;
+ int r[16];
+ int *p;
+
+ memset(r, 0, sizeof(r));
+ get_options(in, ARRAY_SIZE(r), r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (parsed) expected %d numbers, got %d",
+ n, e[0], r[0]);
+ for (i = 1; i < ARRAY_SIZE(r); i++)
+ KUNIT_EXPECT_EQ_MSG(test, r[i], e[i], "in test %u at %u", n, i);
+
+ memset(r, 0, sizeof(r));
+ get_options(in, 0, r);
+ KUNIT_EXPECT_EQ_MSG(test, r[0], e[0], "in test %u (validated) expected %d numbers, got %d",
+ n, e[0], r[0]);
+
+ p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
+}
+
+static void cmdline_test_range(struct kunit *test)
+{
+ unsigned int i = 0;
+
+ do {
+ const char *str = cmdline_test_range_strings[i];
+ const int *e = cmdline_test_range_values[i];
+
+ cmdline_do_one_range_test(test, str, i, e);
+ } while (++i < ARRAY_SIZE(cmdline_test_range_strings));
+}
+
+static struct kunit_case cmdline_test_cases[] = {
+ KUNIT_CASE(cmdline_test_noint),
+ KUNIT_CASE(cmdline_test_lead_int),
+ KUNIT_CASE(cmdline_test_tail_int),
+ KUNIT_CASE(cmdline_test_range),
+ {}
+};
+
+static struct kunit_suite cmdline_test_suite = {
+ .name = "cmdline",
+ .test_cases = cmdline_test_cases,
+};
+kunit_test_suite(cmdline_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/cmpdi2.c b/lib/cmpdi2.c
new file mode 100644
index 000000000..f7cebd52b
--- /dev/null
+++ b/lib/cmpdi2.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+
+#include <linux/export.h>
+
+#include <linux/libgcc.h>
+
+word_type notrace __cmpdi2(long long a, long long b)
+{
+ const DWunion au = {
+ .ll = a
+ };
+ const DWunion bu = {
+ .ll = b
+ };
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+
+ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+ return 0;
+ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+ return 2;
+
+ return 1;
+}
+EXPORT_SYMBOL(__cmpdi2);
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644
index 000000000..3d6b8996f
--- /dev/null
+++ b/lib/compat_audit.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit_arch.h>
+#include <asm/unistd32.h>
+
+unsigned compat_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned compat_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned compat_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned compat_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned compat_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_compat_syscall(int abi, unsigned syscall)
+{
+ switch (syscall) {
+#ifdef __NR_open
+ case __NR_open:
+ return AUDITSC_OPEN;
+#endif
+#ifdef __NR_openat
+ case __NR_openat:
+ return AUDITSC_OPENAT;
+#endif
+#ifdef __NR_socketcall
+ case __NR_socketcall:
+ return AUDITSC_SOCKETCALL;
+#endif
+ case __NR_execve:
+ return AUDITSC_EXECVE;
+#ifdef __NR_openat2
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
+#endif
+ default:
+ return AUDITSC_COMPAT;
+ }
+}
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 000000000..4c348670d
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ */
+
+#include <linux/cpu_rmap.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities. This can be seen as a reverse-map of
+ * CPU affinity. However, we do not assume that the object affinities
+ * cover all CPUs in the system. For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+ struct cpu_rmap *rmap;
+ unsigned int cpu;
+ size_t obj_offset;
+
+ /* This is a silly number of objects, and we use u16 indices. */
+ if (size > 0xffff)
+ return NULL;
+
+ /* Offset of object pointer array from base structure */
+ obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+ sizeof(void *));
+
+ rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+ if (!rmap)
+ return NULL;
+
+ kref_init(&rmap->refcount);
+ rmap->obj = (void **)((char *)rmap + obj_offset);
+
+ /* Initially assign CPUs to objects on a rota, since we have
+ * no idea where the objects are. Use infinite distance, so
+ * any object with known distance is preferable. Include the
+ * CPUs that are not present/online, since we definitely want
+ * any newly-hotplugged CPUs to have some object assigned.
+ */
+ for_each_possible_cpu(cpu) {
+ rmap->near[cpu].index = cpu % size;
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ }
+
+ rmap->size = size;
+ return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/**
+ * cpu_rmap_release - internal reclaiming helper called from kref_put
+ * @ref: kref to struct cpu_rmap
+ */
+static void cpu_rmap_release(struct kref *ref)
+{
+ struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
+ kfree(rmap);
+}
+
+/**
+ * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+static inline void cpu_rmap_get(struct cpu_rmap *rmap)
+{
+ kref_get(&rmap->refcount);
+}
+
+/**
+ * cpu_rmap_put - release ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+int cpu_rmap_put(struct cpu_rmap *rmap)
+{
+ return kref_put(&rmap->refcount, cpu_rmap_release);
+}
+EXPORT_SYMBOL(cpu_rmap_put);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+ const struct cpumask *mask, u16 dist)
+{
+ int neigh;
+
+ for_each_cpu(neigh, mask) {
+ if (rmap->near[cpu].dist > dist &&
+ rmap->near[neigh].dist <= dist) {
+ rmap->near[cpu].index = rmap->near[neigh].index;
+ rmap->near[cpu].dist = dist;
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+ unsigned index;
+ unsigned int cpu;
+
+ pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+ for_each_possible_cpu(cpu) {
+ index = rmap->near[cpu].index;
+ pr_info("cpu %d -> obj %u (distance %u)\n",
+ cpu, index, rmap->near[cpu].dist);
+ }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+static int get_free_index(struct cpu_rmap *rmap)
+{
+ int i;
+
+ for (i = 0; i < rmap->size; i++)
+ if (!rmap->obj[i])
+ return i;
+
+ return -ENOSPC;
+}
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object or -ENOSPC if no free entry was found
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+ int index = get_free_index(rmap);
+
+ if (index < 0)
+ return index;
+
+ rmap->obj[index] = obj;
+ return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity)
+{
+ cpumask_var_t update_mask;
+ unsigned int cpu;
+
+ if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* Invalidate distance for all CPUs for which this used to be
+ * the nearest object. Mark those CPUs for update.
+ */
+ for_each_online_cpu(cpu) {
+ if (rmap->near[cpu].index == index) {
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ cpumask_set_cpu(cpu, update_mask);
+ }
+ }
+
+ debug_print_rmap(rmap, "after invalidating old distances");
+
+ /* Set distance to 0 for all CPUs in the new affinity mask.
+ * Mark all CPUs within their NUMA nodes for update.
+ */
+ for_each_cpu(cpu, affinity) {
+ rmap->near[cpu].index = index;
+ rmap->near[cpu].dist = 0;
+ cpumask_or(update_mask, update_mask,
+ cpumask_of_node(cpu_to_node(cpu)));
+ }
+
+ debug_print_rmap(rmap, "after updating neighbours");
+
+ /* Update distances based on topology */
+ for_each_cpu(cpu, update_mask) {
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_sibling_cpumask(cpu), 1))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_core_cpumask(cpu), 2))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ cpumask_of_node(cpu_to_node(cpu)), 3))
+ continue;
+ /* We could continue into NUMA node distances, but for now
+ * we give up.
+ */
+ }
+
+ debug_print_rmap(rmap, "after copying neighbours");
+
+ free_cpumask_var(update_mask);
+ return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+ struct irq_affinity_notify notify;
+ struct cpu_rmap *rmap;
+ u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+ struct irq_glue *glue;
+ u16 index;
+
+ if (!rmap)
+ return;
+
+ for (index = 0; index < rmap->size; index++) {
+ glue = rmap->obj[index];
+ if (glue)
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
+ }
+
+ cpu_rmap_put(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+/**
+ * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
+ * @notify: struct irq_affinity_notify passed by irq/manage.c
+ * @mask: cpu mask for new SMP affinity
+ *
+ * This is executed in workqueue context.
+ */
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+ struct irq_glue *glue =
+ container_of(notify, struct irq_glue, notify);
+ int rc;
+
+ rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+ if (rc)
+ pr_warn("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+/**
+ * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
+ * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
+ */
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+
+ glue->rmap->obj[glue->index] = NULL;
+ cpu_rmap_put(glue->rmap);
+ kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_remove - remove an IRQ from a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ */
+int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq)
+{
+ return irq_set_affinity_notifier(irq, NULL);
+}
+EXPORT_SYMBOL(irq_cpu_rmap_remove);
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+ struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ int rc;
+
+ if (!glue)
+ return -ENOMEM;
+ glue->notify.notify = irq_cpu_rmap_notify;
+ glue->notify.release = irq_cpu_rmap_release;
+ glue->rmap = rmap;
+ cpu_rmap_get(rmap);
+ rc = cpu_rmap_add(rmap, glue);
+ if (rc < 0)
+ goto err_add;
+
+ glue->index = rc;
+ rc = irq_set_affinity_notifier(irq, &glue->notify);
+ if (rc)
+ goto err_set;
+
+ return rc;
+
+err_set:
+ rmap->obj[glue->index] = NULL;
+err_add:
+ cpu_rmap_put(glue->rmap);
+ kfree(glue);
+ return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
diff --git a/lib/cpumask.c b/lib/cpumask.c
new file mode 100644
index 000000000..a7fd02b5a
--- /dev/null
+++ b/lib/cpumask.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/numa.h>
+
+/**
+ * cpumask_next_wrap - helper to implement for_each_cpu_wrap
+ * @n: the cpu prior to the place to search
+ * @mask: the cpumask pointer
+ * @start: the start point of the iteration
+ * @wrap: assume @n crossing @start terminates the iteration
+ *
+ * Returns >= nr_cpu_ids on completion
+ *
+ * Note: the @wrap argument is required for the start condition when
+ * we cannot assume @start is set in @mask.
+ */
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ unsigned int next;
+
+again:
+ next = cpumask_next(n, mask);
+
+ if (wrap && n < start && next >= start) {
+ return nr_cpumask_bits;
+
+ } else if (next >= nr_cpumask_bits) {
+ wrap = true;
+ n = -1;
+ goto again;
+ }
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_next_wrap);
+
+/* These are not inline because of header tangles. */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/**
+ * alloc_cpumask_var_node - allocate a struct cpumask on a given node
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ * @node: memory node from which to allocate or %NUMA_NO_NODE
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>)
+ * Returns TRUE if memory allocation succeeded, FALSE otherwise.
+ *
+ * In addition, mask will be NULL if this fails. Note that gcc is
+ * usually smart enough to know that mask can never be NULL if
+ * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
+ * too.
+ */
+bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ *mask = kmalloc_node(cpumask_size(), flags, node);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ if (!*mask) {
+ printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
+ dump_stack();
+ }
+#endif
+
+ return *mask != NULL;
+}
+EXPORT_SYMBOL(alloc_cpumask_var_node);
+
+/**
+ * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop (in <linux/cpumask.h>).
+ * Either returns an allocated (zero-filled) cpumask, or causes the
+ * system to panic.
+ */
+void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+{
+ *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
+ if (!*mask)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ cpumask_size());
+}
+
+/**
+ * free_cpumask_var - frees memory allocated for a struct cpumask.
+ * @mask: cpumask to free
+ *
+ * This is safe on a NULL mask.
+ */
+void free_cpumask_var(cpumask_var_t mask)
+{
+ kfree(mask);
+}
+EXPORT_SYMBOL(free_cpumask_var);
+
+/**
+ * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
+ * @mask: cpumask to free
+ */
+void __init free_bootmem_cpumask_var(cpumask_var_t mask)
+{
+ memblock_free(mask, cpumask_size());
+}
+#endif
+
+/**
+ * cpumask_local_spread - select the i'th cpu based on NUMA distances
+ * @i: index number
+ * @node: local numa_node
+ *
+ * Returns online CPU according to a numa aware policy; local cpus are returned
+ * first, followed by non-local ones, then it wraps around.
+ *
+ * For those who wants to enumerate all CPUs based on their NUMA distances,
+ * i.e. call this function in a loop, like:
+ *
+ * for (i = 0; i < num_online_cpus(); i++) {
+ * cpu = cpumask_local_spread(i, node);
+ * do_something(cpu);
+ * }
+ *
+ * There's a better alternative based on for_each()-like iterators:
+ *
+ * for_each_numa_hop_mask(mask, node) {
+ * for_each_cpu_andnot(cpu, mask, prev)
+ * do_something(cpu);
+ * prev = mask;
+ * }
+ *
+ * It's simpler and more verbose than above. Complexity of iterator-based
+ * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
+ * cpumask_local_spread() when called for each cpu is
+ * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
+ */
+unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ unsigned int cpu;
+
+ /* Wrap: we always want a cpu. */
+ i %= num_online_cpus();
+
+ cpu = (node == NUMA_NO_NODE) ?
+ cpumask_nth(i, cpu_online_mask) :
+ sched_numa_find_nth_cpu(cpu_online_mask, i, node);
+
+ WARN_ON(cpu >= nr_cpu_ids);
+ return cpu;
+}
+EXPORT_SYMBOL(cpumask_local_spread);
+
+static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
+
+/**
+ * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
+ * @src1p: first &cpumask for intersection
+ * @src2p: second &cpumask for intersection
+ *
+ * Iterated calls using the same srcp1 and srcp2 will be distributed within
+ * their intersection.
+ *
+ * Returns >= nr_cpu_ids if the intersection is empty.
+ */
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ unsigned int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+
+ next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits, prev + 1);
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_and_distribute);
+
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
+{
+ unsigned int next, prev;
+
+ /* NOTE: our first selection will skip 0. */
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
+ next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
+ if (next < nr_cpu_ids)
+ __this_cpu_write(distribute_cpu_mask_prev, next);
+
+ return next;
+}
+EXPORT_SYMBOL(cpumask_any_distribute);
diff --git a/lib/cpumask_kunit.c b/lib/cpumask_kunit.c
new file mode 100644
index 000000000..a105e6369
--- /dev/null
+++ b/lib/cpumask_kunit.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for cpumask.
+ *
+ * Author: Sander Vanheule <sander@svanheule.net>
+ */
+
+#include <kunit/test.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+
+#define MASK_MSG(m) \
+ "%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
+ nr_cpumask_bits, cpumask_bits(m)
+
+#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu(cpu, m) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
+ do { \
+ const cpumask_t *m1 = (mask1); \
+ const cpumask_t *m2 = (mask2); \
+ int weight; \
+ int cpu, iter = 0; \
+ cpumask_##op(&mask_tmp, m1, m2); \
+ weight = cpumask_weight(&mask_tmp); \
+ for_each_cpu_##op(cpu, mask1, mask2) \
+ iter++; \
+ KUNIT_EXPECT_EQ((test), weight, iter); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
+ do { \
+ const cpumask_t *m = (mask); \
+ int mask_weight = cpumask_weight(m); \
+ int cpu, iter = 0; \
+ for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
+ } while (0)
+
+#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \
+ do { \
+ int mask_weight = num_##name##_cpus(); \
+ int cpu, iter = 0; \
+ for_each_##name##_cpu(cpu) \
+ iter++; \
+ KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
+ } while (0)
+
+static cpumask_t mask_empty;
+static cpumask_t mask_all;
+static cpumask_t mask_tmp;
+
+static void test_cpumask_weight(struct kunit *test)
+{
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
+}
+
+static void test_cpumask_first(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_last(struct kunit *test)
+{
+ KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_next(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+
+ KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
+ MASK_MSG(&mask_empty));
+ KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
+ MASK_MSG(cpu_possible_mask));
+}
+
+static void test_cpumask_iterators(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
+
+ EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
+ EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
+}
+
+static void test_cpumask_iterators_builtin(struct kunit *test)
+{
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
+
+ /* Ensure the dynamic masks are stable while running the tests */
+ cpu_hotplug_disable();
+
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
+ EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
+
+ cpu_hotplug_enable();
+}
+
+static int test_cpumask_init(struct kunit *test)
+{
+ cpumask_clear(&mask_empty);
+ cpumask_setall(&mask_all);
+
+ return 0;
+}
+
+static struct kunit_case test_cpumask_cases[] = {
+ KUNIT_CASE(test_cpumask_weight),
+ KUNIT_CASE(test_cpumask_first),
+ KUNIT_CASE(test_cpumask_last),
+ KUNIT_CASE(test_cpumask_next),
+ KUNIT_CASE(test_cpumask_iterators),
+ KUNIT_CASE(test_cpumask_iterators_builtin),
+ {}
+};
+
+static struct kunit_suite test_cpumask_suite = {
+ .name = "cpumask",
+ .init = test_cpumask_init,
+ .test_cases = test_cpumask_cases,
+};
+kunit_test_suite(test_cpumask_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
new file mode 100644
index 000000000..d1a7d29d2
--- /dev/null
+++ b/lib/crc-ccitt.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/crc-ccitt.c
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-ccitt.h>
+
+/*
+ * This mysterious table is just the CRC of each possible byte. It can be
+ * computed using the standard bit-at-a-time methods. The polynomial can
+ * be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
+ * Add the implicit x^16, and you have the standard CRC-CCITT.
+ */
+u16 const crc_ccitt_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
+ 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
+ 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
+ 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
+ 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
+ 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
+ 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
+ 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
+ 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
+ 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
+ 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
+ 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
+ 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
+ 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
+ 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
+ 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
+ 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
+ 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
+ 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
+ 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
+ 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
+ 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
+ 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
+ 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
+ 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
+ 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
+ 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
+ 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
+ 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
+ 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
+ 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
+ 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
+};
+EXPORT_SYMBOL(crc_ccitt_table);
+
+/*
+ * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
+ * Reflected bits order, does not augment final value.
+ */
+u16 const crc_ccitt_false_table[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
+ 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
+ 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
+ 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
+ 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
+ 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
+ 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
+ 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
+ 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
+ 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
+ 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
+ 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
+ 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
+ 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
+ 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
+ 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
+ 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
+ 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
+ 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
+ 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
+ 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
+ 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
+ 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
+};
+EXPORT_SYMBOL(crc_ccitt_false_table);
+
+/**
+ * crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
+ * buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ */
+u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc_ccitt_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc_ccitt);
+
+/**
+ * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
+ * for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ */
+u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc_ccitt_false_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc_ccitt_false);
+
+MODULE_DESCRIPTION("CRC-CCITT calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
new file mode 100644
index 000000000..1d26a1647
--- /dev/null
+++ b/lib/crc-itu-t.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * crc-itu-t.c
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-itu-t.h>
+
+/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */
+const u16 crc_itu_t_table[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
+};
+
+EXPORT_SYMBOL(crc_itu_t_table);
+
+/**
+ * crc_itu_t - Compute the CRC-ITU-T for the data buffer
+ *
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ *
+ * Returns the updated CRC value
+ */
+u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len)
+{
+ while (len--)
+ crc = crc_itu_t_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc_itu_t);
+
+MODULE_DESCRIPTION("CRC ITU-T V.41 calculations");
+MODULE_LICENSE("GPL");
+
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
new file mode 100644
index 000000000..1ed2ed487
--- /dev/null
+++ b/lib/crc-t10dif.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * T10 Data Integrity Field CRC16 calculation
+ *
+ * Copyright (c) 2007 Oracle Corporation. All rights reserved.
+ * Written by Martin K. Petersen <martin.petersen@oracle.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-t10dif.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <linux/static_key.h>
+#include <linux/notifier.h>
+
+static struct crypto_shash __rcu *crct10dif_tfm;
+static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
+static DEFINE_MUTEX(crc_t10dif_mutex);
+static struct work_struct crct10dif_rehash_work;
+
+static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ strcmp(alg->cra_name, CRC_T10DIF_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crct10dif_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc_t10dif_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
+
+ mutex_lock(&crc_t10dif_mutex);
+ old = rcu_dereference_protected(crct10dif_tfm,
+ lockdep_is_held(&crc_t10dif_mutex));
+ new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return;
+ }
+ rcu_assign_pointer(crct10dif_tfm, new);
+ mutex_unlock(&crc_t10dif_mutex);
+
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crct10dif_fallback);
+ }
+}
+
+static struct notifier_block crc_t10dif_nb = {
+ .notifier_call = crc_t10dif_notify,
+};
+
+__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
+{
+ struct {
+ struct shash_desc shash;
+ __u16 crc;
+ } desc;
+ int err;
+
+ if (static_branch_unlikely(&crct10dif_fallback))
+ return crc_t10dif_generic(crc, buffer, len);
+
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crct10dif_tfm);
+ desc.crc = crc;
+ err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
+ BUG_ON(err);
+
+ return desc.crc;
+}
+EXPORT_SYMBOL(crc_t10dif_update);
+
+__u16 crc_t10dif(const unsigned char *buffer, size_t len)
+{
+ return crc_t10dif_update(0, buffer, len);
+}
+EXPORT_SYMBOL(crc_t10dif);
+
+static int __init crc_t10dif_mod_init(void)
+{
+ INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
+ crypto_register_notifier(&crc_t10dif_nb);
+ crc_t10dif_rehash(&crct10dif_rehash_work);
+ return 0;
+}
+
+static void __exit crc_t10dif_mod_fini(void)
+{
+ crypto_unregister_notifier(&crc_t10dif_nb);
+ cancel_work_sync(&crct10dif_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
+}
+
+module_init(crc_t10dif_mod_init);
+module_exit(crc_t10dif_mod_fini);
+
+static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crct10dif_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ rcu_read_lock();
+ tfm = rcu_dereference(crct10dif_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
+}
+
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
+
+MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc16.c b/lib/crc16.c
new file mode 100644
index 000000000..5c3a803c0
--- /dev/null
+++ b/lib/crc16.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * crc16.c
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc16.h>
+
+/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
+u16 const crc16_table[256] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
+};
+EXPORT_SYMBOL(crc16_table);
+
+/**
+ * crc16 - compute the CRC-16 for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ *
+ * Returns the updated CRC value.
+ */
+u16 crc16(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc16_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc16);
+
+MODULE_DESCRIPTION("CRC16 calculations");
+MODULE_LICENSE("GPL");
+
diff --git a/lib/crc32.c b/lib/crc32.c
new file mode 100644
index 000000000..5649847d0
--- /dev/null
+++ b/lib/crc32.c
@@ -0,0 +1,344 @@
+/*
+ * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
+ * cleaned up code to current version of sparse and added the slicing-by-8
+ * algorithm to the closely similar existing slicing-by-4 algorithm.
+ *
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
+ * Code was from the public domain, copyright abandoned. Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32(). Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0. The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end. Then individual
+ * users can do whatever they need.
+ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ * fs/jffs2 uses seed 0, doesn't xor with ~0.
+ * fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+/* see: Documentation/staging/crc32.rst for a description of algorithms */
+
+#include <linux/crc32.h>
+#include <linux/crc32poly.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include "crc32defs.h"
+
+#if CRC_LE_BITS > 8
+# define tole(x) ((__force u32) cpu_to_le32(x))
+#else
+# define tole(x) (x)
+#endif
+
+#if CRC_BE_BITS > 8
+# define tobe(x) ((__force u32) cpu_to_be32(x))
+#else
+# define tobe(x) (x)
+#endif
+
+#include "crc32table.h"
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("Various CRC32 calculations");
+MODULE_LICENSE("GPL");
+
+#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
+
+/* implements slicing-by-4 or slicing-by-8 algorithm */
+static inline u32 __pure
+crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
+{
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8)
+# define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
+ t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
+# define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
+ t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
+# else
+# define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
+# define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
+ t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
+# define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
+ t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
+# endif
+ const u32 *b;
+ size_t rem_len;
+# ifdef CONFIG_X86
+ size_t i;
+# endif
+ const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
+# if CRC_LE_BITS != 32
+ const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
+# endif
+ u32 q;
+
+ /* Align it */
+ if (unlikely((long)buf & 3 && len)) {
+ do {
+ DO_CRC(*buf++);
+ } while ((--len) && ((long)buf)&3);
+ }
+
+# if CRC_LE_BITS == 32
+ rem_len = len & 3;
+ len = len >> 2;
+# else
+ rem_len = len & 7;
+ len = len >> 3;
+# endif
+
+ b = (const u32 *)buf;
+# ifdef CONFIG_X86
+ --b;
+ for (i = 0; i < len; i++) {
+# else
+ for (--b; len; --len) {
+# endif
+ q = crc ^ *++b; /* use pre increment for speed */
+# if CRC_LE_BITS == 32
+ crc = DO_CRC4;
+# else
+ crc = DO_CRC8;
+ q = *++b;
+ crc ^= DO_CRC4;
+# endif
+ }
+ len = rem_len;
+ /* And the last few bytes */
+ if (len) {
+ u8 *p = (u8 *)(b + 1) - 1;
+# ifdef CONFIG_X86
+ for (i = 0; i < len; i++)
+ DO_CRC(*++p); /* use pre increment for speed */
+# else
+ do {
+ DO_CRC(*++p); /* use pre increment for speed */
+ } while (--len);
+# endif
+ }
+ return crc;
+#undef DO_CRC
+#undef DO_CRC4
+#undef DO_CRC8
+}
+#endif
+
+
+/**
+ * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
+ * CRC32/CRC32C
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other
+ * uses, or the previous crc32/crc32c value if computing incrementally.
+ * @p: pointer to buffer over which CRC32/CRC32C is run
+ * @len: length of buffer @p
+ * @tab: little-endian Ethernet table
+ * @polynomial: CRC32/CRC32c LE polynomial
+ */
+static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
+ size_t len, const u32 (*tab)[256],
+ u32 polynomial)
+{
+#if CRC_LE_BITS == 1
+ int i;
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
+ }
+# elif CRC_LE_BITS == 2
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 2) ^ tab[0][crc & 3];
+ crc = (crc >> 2) ^ tab[0][crc & 3];
+ crc = (crc >> 2) ^ tab[0][crc & 3];
+ crc = (crc >> 2) ^ tab[0][crc & 3];
+ }
+# elif CRC_LE_BITS == 4
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 4) ^ tab[0][crc & 15];
+ crc = (crc >> 4) ^ tab[0][crc & 15];
+ }
+# elif CRC_LE_BITS == 8
+ /* aka Sarwate algorithm */
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 8) ^ tab[0][crc & 255];
+ }
+# else
+ crc = (__force u32) __cpu_to_le32(crc);
+ crc = crc32_body(crc, p, len, tab);
+ crc = __le32_to_cpu((__force __le32)crc);
+#endif
+ return crc;
+}
+
+#if CRC_LE_BITS == 1
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
+}
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
+}
+#else
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE);
+}
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE);
+}
+#endif
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(__crc32c_le);
+
+u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be);
+
+/*
+ * This multiplies the polynomials x and y modulo the given modulus.
+ * This follows the "little-endian" CRC convention that the lsbit
+ * represents the highest power of x, and the msbit represents x^0.
+ */
+static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
+{
+ u32 product = x & 1 ? y : 0;
+ int i;
+
+ for (i = 0; i < 31; i++) {
+ product = (product >> 1) ^ (product & 1 ? modulus : 0);
+ x >>= 1;
+ product ^= x & 1 ? y : 0;
+ }
+
+ return product;
+}
+
+/**
+ * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time
+ * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
+ * @len: The number of bytes. @crc is multiplied by x^(8*@len)
+ * @polynomial: The modulus used to reduce the result to 32 bits.
+ *
+ * It's possible to parallelize CRC computations by computing a CRC
+ * over separate ranges of a buffer, then summing them.
+ * This shifts the given CRC by 8*len bits (i.e. produces the same effect
+ * as appending len bytes of zero to the data), in time proportional
+ * to log(len).
+ */
+static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+ u32 polynomial)
+{
+ u32 power = polynomial; /* CRC of x^32 */
+ int i;
+
+ /* Shift up to 32 bits in the simple linear way */
+ for (i = 0; i < 8 * (int)(len & 3); i++)
+ crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
+
+ len >>= 2;
+ if (!len)
+ return crc;
+
+ for (;;) {
+ /* "power" is x^(2^i), modulo the polynomial */
+ if (len & 1)
+ crc = gf2_multiply(crc, power, polynomial);
+
+ len >>= 1;
+ if (!len)
+ break;
+
+ /* Square power, advancing to x^(2^(i+1)) */
+ power = gf2_multiply(power, power, polynomial);
+ }
+
+ return crc;
+}
+
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
+}
+
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+{
+ return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
+}
+EXPORT_SYMBOL(crc32_le_shift);
+EXPORT_SYMBOL(__crc32c_le_shift);
+
+/**
+ * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC32 is run
+ * @len: length of buffer @p
+ * @tab: big-endian Ethernet table
+ * @polynomial: CRC32 BE polynomial
+ */
+static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
+ size_t len, const u32 (*tab)[256],
+ u32 polynomial)
+{
+#if CRC_BE_BITS == 1
+ int i;
+ while (len--) {
+ crc ^= *p++ << 24;
+ for (i = 0; i < 8; i++)
+ crc =
+ (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
+ 0);
+ }
+# elif CRC_BE_BITS == 2
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 2) ^ tab[0][crc >> 30];
+ crc = (crc << 2) ^ tab[0][crc >> 30];
+ crc = (crc << 2) ^ tab[0][crc >> 30];
+ crc = (crc << 2) ^ tab[0][crc >> 30];
+ }
+# elif CRC_BE_BITS == 4
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 4) ^ tab[0][crc >> 28];
+ crc = (crc << 4) ^ tab[0][crc >> 28];
+ }
+# elif CRC_BE_BITS == 8
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 8) ^ tab[0][crc >> 24];
+ }
+# else
+ crc = (__force u32) __cpu_to_be32(crc);
+ crc = crc32_body(crc, p, len, tab);
+ crc = __be32_to_cpu((__force __be32)crc);
+# endif
+ return crc;
+}
+
+#if CRC_BE_BITS == 1
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
+}
+#else
+u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+ return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE);
+}
+#endif
+EXPORT_SYMBOL(crc32_be);
diff --git a/lib/crc32defs.h b/lib/crc32defs.h
new file mode 100644
index 000000000..0c8fb5923
--- /dev/null
+++ b/lib/crc32defs.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Try to choose an implementation variant via Kconfig */
+#ifdef CONFIG_CRC32_SLICEBY8
+# define CRC_LE_BITS 64
+# define CRC_BE_BITS 64
+#endif
+#ifdef CONFIG_CRC32_SLICEBY4
+# define CRC_LE_BITS 32
+# define CRC_BE_BITS 32
+#endif
+#ifdef CONFIG_CRC32_SARWATE
+# define CRC_LE_BITS 8
+# define CRC_BE_BITS 8
+#endif
+#ifdef CONFIG_CRC32_BIT
+# define CRC_LE_BITS 1
+# define CRC_BE_BITS 1
+#endif
+
+/*
+ * How many bits at a time to use. Valid values are 1, 2, 4, 8, 32 and 64.
+ * For less performance-sensitive, use 4 or 8 to save table size.
+ * For larger systems choose same as CPU architecture as default.
+ * This works well on X86_64, SPARC64 systems. This may require some
+ * elaboration after experiments with other architectures.
+ */
+#ifndef CRC_LE_BITS
+# ifdef CONFIG_64BIT
+# define CRC_LE_BITS 64
+# else
+# define CRC_LE_BITS 32
+# endif
+#endif
+#ifndef CRC_BE_BITS
+# ifdef CONFIG_64BIT
+# define CRC_BE_BITS 64
+# else
+# define CRC_BE_BITS 32
+# endif
+#endif
+
+/*
+ * Little-endian CRC computation. Used with serial bit streams sent
+ * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
+ */
+#if CRC_LE_BITS > 64 || CRC_LE_BITS < 1 || CRC_LE_BITS == 16 || \
+ CRC_LE_BITS & CRC_LE_BITS-1
+# error "CRC_LE_BITS must be one of {1, 2, 4, 8, 32, 64}"
+#endif
+
+/*
+ * Big-endian CRC computation. Used with serial bit streams sent
+ * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
+ */
+#if CRC_BE_BITS > 64 || CRC_BE_BITS < 1 || CRC_BE_BITS == 16 || \
+ CRC_BE_BITS & CRC_BE_BITS-1
+# error "CRC_BE_BITS must be one of {1, 2, 4, 8, 32, 64}"
+#endif
diff --git a/lib/crc32test.c b/lib/crc32test.c
new file mode 100644
index 000000000..9b4af7941
--- /dev/null
+++ b/lib/crc32test.c
@@ -0,0 +1,852 @@
+/*
+ * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
+ * cleaned up code to current version of sparse and added the slicing-by-8
+ * algorithm to the closely similar existing slicing-by-4 algorithm.
+ *
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
+ * Code was from the public domain, copyright abandoned. Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32(). Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0. The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end. Then individual
+ * users can do whatever they need.
+ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ * fs/jffs2 uses seed 0, doesn't xor with ~0.
+ * fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include "crc32defs.h"
+
+/* 4096 random bytes */
+static u8 const __aligned(8) test_buf[] __initconst =
+{
+ 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
+ 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
+ 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
+ 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
+ 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
+ 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
+ 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
+ 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
+ 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
+ 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
+ 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
+ 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
+ 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
+ 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
+ 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
+ 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
+ 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
+ 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
+ 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
+ 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
+ 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
+ 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
+ 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
+ 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
+ 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
+ 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
+ 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
+ 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
+ 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
+ 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
+ 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
+ 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
+ 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
+ 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
+ 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
+ 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
+ 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
+ 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
+ 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
+ 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
+ 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
+ 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
+ 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
+ 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
+ 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
+ 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
+ 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
+ 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
+ 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
+ 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
+ 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
+ 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
+ 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
+ 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
+ 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
+ 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
+ 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
+ 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
+ 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
+ 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
+ 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
+ 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
+ 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
+ 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
+ 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
+ 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
+ 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
+ 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
+ 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
+ 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
+ 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
+ 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
+ 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
+ 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
+ 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
+ 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
+ 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
+ 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
+ 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
+ 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
+ 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
+ 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
+ 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
+ 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
+ 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
+ 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
+ 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
+ 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
+ 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
+ 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
+ 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
+ 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
+ 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
+ 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
+ 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
+ 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
+ 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
+ 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
+ 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
+ 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
+ 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
+ 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
+ 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
+ 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
+ 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
+ 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
+ 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
+ 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
+ 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
+ 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
+ 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
+ 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
+ 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
+ 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
+ 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
+ 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
+ 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
+ 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
+ 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
+ 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
+ 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
+ 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
+ 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
+ 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
+ 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
+ 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
+ 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
+ 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
+ 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
+ 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
+ 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
+ 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
+ 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
+ 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
+ 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
+ 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
+ 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
+ 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
+ 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
+ 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
+ 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
+ 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
+ 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
+ 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
+ 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
+ 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
+ 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
+ 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
+ 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
+ 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
+ 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
+ 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
+ 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
+ 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
+ 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
+ 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
+ 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
+ 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
+ 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
+ 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
+ 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
+ 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
+ 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
+ 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
+ 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
+ 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
+ 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
+ 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
+ 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
+ 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
+ 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
+ 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
+ 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
+ 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
+ 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
+ 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
+ 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
+ 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
+ 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
+ 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
+ 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
+ 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
+ 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
+ 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
+ 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
+ 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
+ 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
+ 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
+ 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
+ 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
+ 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
+ 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
+ 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
+ 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
+ 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
+ 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
+ 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
+ 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
+ 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
+ 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
+ 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
+ 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
+ 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
+ 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
+ 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
+ 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
+ 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
+ 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
+ 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
+ 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
+ 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
+ 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
+ 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
+ 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
+ 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
+ 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
+ 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
+ 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
+ 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
+ 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
+ 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
+ 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
+ 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
+ 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
+ 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
+ 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
+ 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
+ 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
+ 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
+ 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
+ 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
+ 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
+ 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
+ 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
+ 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
+ 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
+ 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
+ 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
+ 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
+ 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
+ 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
+ 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
+ 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
+ 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
+ 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
+ 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
+ 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
+ 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
+ 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
+ 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
+ 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
+ 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
+ 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
+ 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
+ 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
+ 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
+ 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
+ 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
+ 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
+ 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
+ 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
+ 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
+ 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
+ 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
+ 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
+ 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
+ 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
+ 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
+ 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
+ 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
+ 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
+ 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
+ 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
+ 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
+ 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
+ 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
+ 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
+ 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
+ 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
+ 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
+ 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
+ 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
+ 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
+ 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
+ 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
+ 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
+ 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
+ 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
+ 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
+ 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
+ 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
+ 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
+ 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
+ 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
+ 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
+ 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
+ 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
+ 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
+ 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
+ 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
+ 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
+ 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
+ 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
+ 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
+ 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
+ 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
+ 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
+ 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
+ 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
+ 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
+ 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
+ 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
+ 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
+ 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
+ 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
+ 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
+ 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
+ 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
+ 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
+ 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
+ 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
+ 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
+ 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
+ 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
+ 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
+ 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
+ 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
+ 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
+ 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
+ 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
+ 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
+ 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
+ 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
+ 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
+ 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
+ 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
+ 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
+ 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
+ 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
+ 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
+ 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
+ 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
+ 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
+ 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
+ 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
+ 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
+ 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
+ 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
+ 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
+ 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
+ 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
+ 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
+ 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
+ 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
+ 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
+ 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
+ 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
+ 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
+ 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
+ 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
+ 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
+ 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
+ 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
+ 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
+ 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
+ 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
+ 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
+ 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
+ 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
+ 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
+ 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
+ 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
+ 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
+ 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
+ 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
+ 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
+ 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
+ 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
+ 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
+ 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
+ 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
+ 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
+ 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
+ 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
+ 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
+ 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
+ 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
+ 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
+ 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
+ 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
+ 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
+ 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
+ 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
+ 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
+ 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
+ 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
+ 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
+ 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
+ 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
+ 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
+ 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
+ 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
+ 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
+ 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
+ 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
+ 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
+ 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
+ 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
+ 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
+ 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
+ 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
+ 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
+ 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
+ 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
+ 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
+ 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
+ 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
+ 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
+ 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
+ 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
+ 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
+ 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
+ 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
+ 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
+ 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
+ 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
+ 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
+ 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
+ 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
+ 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
+ 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
+ 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
+ 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
+ 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
+ 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
+ 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
+ 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
+ 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
+ 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
+ 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
+ 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
+ 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
+ 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
+ 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
+ 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
+ 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
+ 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
+ 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
+ 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
+ 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
+ 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
+ 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
+ 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
+ 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
+ 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
+ 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
+ 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
+ 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
+ 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
+ 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
+ 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
+ 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
+ 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
+ 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
+ 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
+ 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
+ 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
+ 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
+ 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
+ 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
+ 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
+ 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
+ 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
+ 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
+ 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
+ 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
+ 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
+ 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
+ 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
+ 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
+ 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
+ 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
+ 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
+ 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
+ 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
+ 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
+ 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
+ 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
+ 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
+ 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
+ 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
+ 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
+ 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
+ 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
+ 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
+ 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
+ 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
+ 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
+ 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
+ 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
+ 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
+ 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
+ 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
+ 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
+ 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
+ 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
+ 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
+ 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
+ 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
+ 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
+ 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
+ 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
+};
+
+/* 100 test cases */
+static struct crc_test {
+ u32 crc; /* random starting crc */
+ u32 start; /* random 6 bit offset in buf */
+ u32 length; /* random 11 bit length of test */
+ u32 crc_le; /* expected crc32_le result */
+ u32 crc_be; /* expected crc32_be result */
+ u32 crc32c_le; /* expected crc32c_le result */
+} const test[] __initconst =
+{
+ {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
+ {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
+ {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
+ {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
+ {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
+ {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
+ {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
+ {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
+ {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
+ {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
+ {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
+ {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
+ {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
+ {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
+ {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
+ {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
+ {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
+ {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
+ {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
+ {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
+ {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
+ {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
+ {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
+ {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
+ {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
+ {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
+ {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
+ {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
+ {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
+ {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
+ {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
+ {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
+ {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
+ {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
+ {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
+ {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
+ {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
+ {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
+ {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
+ {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
+ {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
+ {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
+ {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
+ {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
+ {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
+ {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
+ {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
+ {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
+ {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
+ {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
+ {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
+ {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
+ {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
+ {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
+ {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
+ {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
+ {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
+ {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
+ {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
+ {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
+ {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
+ {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
+ {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
+ {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
+ {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
+ {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
+ {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
+ {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
+ {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
+ {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
+ {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
+ {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
+ {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
+ {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
+ {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
+ {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
+ {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
+ {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
+ {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
+ {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
+ {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
+ {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
+ {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
+ {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
+ {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
+ {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
+ {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
+ {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
+ {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
+ {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
+ {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
+ {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
+ {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
+ {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
+ {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
+ {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
+ {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
+ {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
+ {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
+ {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
+};
+
+#include <linux/time.h>
+
+static int __init crc32c_test(void)
+{
+ int i;
+ int errors = 0;
+ int bytes = 0;
+ u64 nsec;
+ unsigned long flags;
+
+ /* keep static to prevent cache warming code from
+ * getting eliminated by the compiler */
+ static u32 crc;
+
+ /* pre-warm the cache */
+ for (i = 0; i < 100; i++) {
+ bytes += test[i].length;
+
+ crc ^= __crc32c_le(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+ }
+
+ /* reduce OS noise */
+ local_irq_save(flags);
+
+ nsec = ktime_get_ns();
+ for (i = 0; i < 100; i++) {
+ if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+ }
+ nsec = ktime_get_ns() - nsec;
+
+ local_irq_restore(flags);
+
+ pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
+
+ if (errors)
+ pr_warn("crc32c: %d self tests failed\n", errors);
+ else {
+ pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
+ bytes, nsec);
+ }
+
+ return 0;
+}
+
+static int __init crc32c_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = __crc32c_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = __crc32c_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc32c_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32c_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
+static int __init crc32_test(void)
+{
+ int i;
+ int errors = 0;
+ int bytes = 0;
+ u64 nsec;
+ unsigned long flags;
+
+ /* keep static to prevent cache warming code from
+ * getting eliminated by the compiler */
+ static u32 crc;
+
+ /* pre-warm the cache */
+ for (i = 0; i < 100; i++) {
+ bytes += 2*test[i].length;
+
+ crc ^= crc32_le(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+
+ crc ^= crc32_be(test[i].crc, test_buf +
+ test[i].start, test[i].length);
+ }
+
+ /* reduce OS noise */
+ local_irq_save(flags);
+
+ nsec = ktime_get_ns();
+ for (i = 0; i < 100; i++) {
+ if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+
+ if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
+ test[i].start, test[i].length))
+ errors++;
+ }
+ nsec = ktime_get_ns() - nsec;
+
+ local_irq_restore(flags);
+
+ pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
+ CRC_LE_BITS, CRC_BE_BITS);
+
+ if (errors)
+ pr_warn("crc32: %d self tests failed\n", errors);
+ else {
+ pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
+ bytes, nsec);
+ }
+
+ return 0;
+}
+
+static int __init crc32_combine_test(void)
+{
+ int i, j;
+ int errors = 0, runs = 0;
+
+ for (i = 0; i < 10; i++) {
+ u32 crc_full;
+
+ crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
+ test[i].length);
+ for (j = 0; j <= test[i].length; ++j) {
+ u32 crc1, crc2;
+ u32 len1 = j, len2 = test[i].length - j;
+
+ crc1 = crc32_le(test[i].crc, test_buf +
+ test[i].start, len1);
+ crc2 = crc32_le(0, test_buf + test[i].start +
+ len1, len2);
+
+ if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
+ crc_full == test[i].crc_le))
+ errors++;
+ runs++;
+ cond_resched();
+ }
+ }
+
+ if (errors)
+ pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("crc32_combine: %d self tests passed\n", runs);
+
+ return 0;
+}
+
+static int __init crc32test_init(void)
+{
+ crc32_test();
+ crc32c_test();
+
+ crc32_combine_test();
+ crc32c_combine_test();
+
+ return 0;
+}
+
+static void __exit crc32_exit(void)
+{
+}
+
+module_init(crc32test_init);
+module_exit(crc32_exit);
+
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("CRC32 selftest");
+MODULE_LICENSE("GPL");
diff --git a/lib/crc4.c b/lib/crc4.c
new file mode 100644
index 000000000..e7e1779c6
--- /dev/null
+++ b/lib/crc4.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * crc4.c - simple crc-4 calculations.
+ */
+
+#include <linux/crc4.h>
+#include <linux/module.h>
+
+static const uint8_t crc4_tab[] = {
+ 0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2,
+ 0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3,
+};
+
+/**
+ * crc4 - calculate the 4-bit crc of a value.
+ * @c: starting crc4
+ * @x: value to checksum
+ * @bits: number of bits in @x to checksum
+ *
+ * Returns the crc4 value of @x, using polynomial 0b10111.
+ *
+ * The @x value is treated as left-aligned, and bits above @bits are ignored
+ * in the crc calculations.
+ */
+uint8_t crc4(uint8_t c, uint64_t x, int bits)
+{
+ int i;
+
+ /* mask off anything above the top bit */
+ x &= (1ull << bits) - 1;
+
+ /* Align to 4-bits */
+ bits = (bits + 3) & ~0x3;
+
+ /* Calculate crc4 over four-bit nibbles, starting at the MSbit */
+ for (i = bits - 4; i >= 0; i -= 4)
+ c = crc4_tab[c ^ ((x >> i) & 0xf)];
+
+ return c;
+}
+EXPORT_SYMBOL_GPL(crc4);
+
+MODULE_DESCRIPTION("CRC4 calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/crc64-rocksoft.c b/lib/crc64-rocksoft.c
new file mode 100644
index 000000000..fc9ae0da5
--- /dev/null
+++ b/lib/crc64-rocksoft.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc64.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <linux/static_key.h>
+#include <linux/notifier.h>
+
+static struct crypto_shash __rcu *crc64_rocksoft_tfm;
+static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback);
+static DEFINE_MUTEX(crc64_rocksoft_mutex);
+static struct work_struct crc64_rocksoft_rehash_work;
+
+static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING))
+ return NOTIFY_DONE;
+
+ schedule_work(&crc64_rocksoft_rehash_work);
+ return NOTIFY_OK;
+}
+
+static void crc64_rocksoft_rehash(struct work_struct *work)
+{
+ struct crypto_shash *new, *old;
+
+ mutex_lock(&crc64_rocksoft_mutex);
+ old = rcu_dereference_protected(crc64_rocksoft_tfm,
+ lockdep_is_held(&crc64_rocksoft_mutex));
+ new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc64_rocksoft_mutex);
+ return;
+ }
+ rcu_assign_pointer(crc64_rocksoft_tfm, new);
+ mutex_unlock(&crc64_rocksoft_mutex);
+
+ if (old) {
+ synchronize_rcu();
+ crypto_free_shash(old);
+ } else {
+ static_branch_disable(&crc64_rocksoft_fallback);
+ }
+}
+
+static struct notifier_block crc64_rocksoft_nb = {
+ .notifier_call = crc64_rocksoft_notify,
+};
+
+u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len)
+{
+ struct {
+ struct shash_desc shash;
+ u64 crc;
+ } desc;
+ int err;
+
+ if (static_branch_unlikely(&crc64_rocksoft_fallback))
+ return crc64_rocksoft_generic(crc, buffer, len);
+
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm);
+ desc.crc = crc;
+ err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
+ BUG_ON(err);
+
+ return desc.crc;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft_update);
+
+u64 crc64_rocksoft(const unsigned char *buffer, size_t len)
+{
+ return crc64_rocksoft_update(0, buffer, len);
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft);
+
+static int __init crc64_rocksoft_mod_init(void)
+{
+ INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash);
+ crypto_register_notifier(&crc64_rocksoft_nb);
+ crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work);
+ return 0;
+}
+
+static void __exit crc64_rocksoft_mod_fini(void)
+{
+ crypto_unregister_notifier(&crc64_rocksoft_nb);
+ cancel_work_sync(&crc64_rocksoft_rehash_work);
+ crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1));
+}
+
+module_init(crc64_rocksoft_mod_init);
+module_exit(crc64_rocksoft_mod_fini);
+
+static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ struct crypto_shash *tfm;
+ int len;
+
+ if (static_branch_unlikely(&crc64_rocksoft_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ rcu_read_lock();
+ tfm = rcu_dereference(crc64_rocksoft_tfm);
+ len = snprintf(buffer, PAGE_SIZE, "%s\n",
+ crypto_shash_driver_name(tfm));
+ rcu_read_unlock();
+
+ return len;
+}
+
+module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444);
+
+MODULE_AUTHOR("Keith Busch <kbusch@kernel.org>");
+MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc64");
diff --git a/lib/crc64.c b/lib/crc64.c
new file mode 100644
index 000000000..61ae8dfb6
--- /dev/null
+++ b/lib/crc64.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Normal 64-bit CRC calculation.
+ *
+ * This is a basic crc64 implementation following ECMA-182 specification,
+ * which can be found from,
+ * https://www.ecma-international.org/publications/standards/Ecma-182.htm
+ *
+ * Dr. Ross N. Williams has a great document to introduce the idea of CRC
+ * algorithm, here the CRC64 code is also inspired by the table-driven
+ * algorithm and detail example from this paper. This paper can be found
+ * from,
+ * http://www.ross.net/crc/download/crc_v3.txt
+ *
+ * crc64table[256] is the lookup table of a table-driven 64-bit CRC
+ * calculation, which is generated by gen_crc64table.c in kernel build
+ * time. The polynomial of crc64 arithmetic is from ECMA-182 specification
+ * as well, which is defined as,
+ *
+ * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x + 1
+ *
+ * crc64rocksoft[256] table is from the Rocksoft specification polynomial
+ * defined as,
+ *
+ * x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 +
+ * x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 +
+ * x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 + x^4 + x^3 + 1
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/crc64.h>
+#include "crc64table.h"
+
+MODULE_DESCRIPTION("CRC64 calculations");
+MODULE_LICENSE("GPL v2");
+
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ * or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_be(u64 crc, const void *p, size_t len)
+{
+ size_t i, t;
+
+ const unsigned char *_p = p;
+
+ for (i = 0; i < len; i++) {
+ t = ((crc >> 56) ^ (*_p++)) & 0xFF;
+ crc = crc64table[t] ^ (crc << 8);
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL_GPL(crc64_be);
+
+/**
+ * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64
+ * @crc: seed value for computation. 0 for a new CRC calculation, or the
+ * previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len)
+{
+ const unsigned char *_p = p;
+ size_t i;
+
+ crc = ~crc;
+
+ for (i = 0; i < len; i++)
+ crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++];
+
+ return ~crc;
+}
+EXPORT_SYMBOL_GPL(crc64_rocksoft_generic);
diff --git a/lib/crc7.c b/lib/crc7.c
new file mode 100644
index 000000000..3848e313b
--- /dev/null
+++ b/lib/crc7.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * crc7.c
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc7.h>
+
+
+/*
+ * Table for CRC-7 (polynomial x^7 + x^3 + 1).
+ * This is a big-endian CRC (msbit is highest power of x),
+ * aligned so the msbit of the byte is the x^6 coefficient
+ * and the lsbit is not used.
+ */
+const u8 crc7_be_syndrome_table[256] = {
+ 0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
+ 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
+ 0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
+ 0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
+ 0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
+ 0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
+ 0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
+ 0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
+ 0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
+ 0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
+ 0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
+ 0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
+ 0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
+ 0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
+ 0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
+ 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
+ 0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
+ 0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
+ 0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
+ 0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
+ 0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
+ 0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
+ 0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
+ 0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
+ 0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
+ 0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
+ 0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
+ 0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
+ 0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
+ 0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
+ 0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
+ 0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
+};
+EXPORT_SYMBOL(crc7_be_syndrome_table);
+
+/**
+ * crc7_be - update the CRC7 for the data buffer
+ * @crc: previous CRC7 value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ * Context: any
+ *
+ * Returns the updated CRC7 value.
+ * The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
+ * makes the computation easier, and all callers want it in that form.
+ *
+ */
+u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
+{
+ while (len--)
+ crc = crc7_be_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc7_be);
+
+MODULE_DESCRIPTION("CRC7 calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/crc8.c b/lib/crc8.c
new file mode 100644
index 000000000..1ad8e501d
--- /dev/null
+++ b/lib/crc8.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crc8.h>
+#include <linux/printk.h>
+
+/**
+ * crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
+ *
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
+ */
+void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
+{
+ int i, j;
+ const u8 msbit = 0x80;
+ u8 t = msbit;
+
+ table[0] = 0;
+
+ for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) {
+ t = (t << 1) ^ (t & msbit ? polynomial : 0);
+ for (j = 0; j < i; j++)
+ table[i+j] = table[j] ^ t;
+ }
+}
+EXPORT_SYMBOL(crc8_populate_msb);
+
+/**
+ * crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
+ *
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
+ */
+void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial)
+{
+ int i, j;
+ u8 t = 1;
+
+ table[0] = 0;
+
+ for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) {
+ t = (t >> 1) ^ (t & 1 ? polynomial : 0);
+ for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i)
+ table[i+j] = table[j] ^ t;
+ }
+}
+EXPORT_SYMBOL(crc8_populate_lsb);
+
+/**
+ * crc8 - calculate a crc8 over the given input data.
+ *
+ * @table: crc table used for calculation.
+ * @pdata: pointer to data buffer.
+ * @nbytes: number of bytes in data buffer.
+ * @crc: previous returned crc8 value.
+ */
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc)
+{
+ /* loop over the buffer data */
+ while (nbytes-- > 0)
+ crc = table[(crc ^ *pdata++) & 0xff];
+
+ return crc;
+}
+EXPORT_SYMBOL(crc8);
+
+MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
new file mode 100644
index 000000000..45436bfc6
--- /dev/null
+++ b/lib/crypto/Kconfig
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Crypto library routines"
+
+config CRYPTO_LIB_UTILS
+ tristate
+
+config CRYPTO_LIB_AES
+ tristate
+
+config CRYPTO_LIB_AESGCM
+ tristate
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_GF128MUL
+ select CRYPTO_LIB_UTILS
+
+config CRYPTO_LIB_ARC4
+ tristate
+
+config CRYPTO_LIB_GF128MUL
+ tristate
+
+config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ bool
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Blake2s library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_BLAKE2S_GENERIC
+ def_bool !CRYPTO_ARCH_HAVE_LIB_BLAKE2S
+ help
+ This symbol can be depended upon by arch implementations of the
+ Blake2s library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_BLAKE2S.
+
+config CRYPTO_ARCH_HAVE_LIB_CHACHA
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the ChaCha library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_CHACHA_GENERIC
+ tristate
+ select CRYPTO_LIB_UTILS
+ help
+ This symbol can be depended upon by arch implementations of the
+ ChaCha library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_CHACHA.
+
+config CRYPTO_LIB_CHACHA
+ tristate "ChaCha library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n
+ help
+ Enable the ChaCha library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Curve25519 library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_CURVE25519_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Curve25519 library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_CURVE25519.
+
+config CRYPTO_LIB_CURVE25519
+ tristate "Curve25519 scalar multiplication library"
+ depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n
+ select CRYPTO_LIB_UTILS
+ help
+ Enable the Curve25519 library interface. This interface may be
+ fulfilled by either the generic implementation or an arch-specific
+ one, if one is available and enabled.
+
+config CRYPTO_LIB_DES
+ tristate
+
+config CRYPTO_LIB_POLY1305_RSIZE
+ int
+ default 2 if MIPS
+ default 11 if X86_64
+ default 9 if ARM || ARM64
+ default 1
+
+config CRYPTO_ARCH_HAVE_LIB_POLY1305
+ tristate
+ help
+ Declares whether the architecture provides an arch-specific
+ accelerated implementation of the Poly1305 library interface,
+ either builtin or as a module.
+
+config CRYPTO_LIB_POLY1305_GENERIC
+ tristate
+ help
+ This symbol can be depended upon by arch implementations of the
+ Poly1305 library interface that require the generic code as a
+ fallback, e.g., for SIMD implementations. If no arch specific
+ implementation is enabled, this implementation serves the users
+ of CRYPTO_LIB_POLY1305.
+
+config CRYPTO_LIB_POLY1305
+ tristate "Poly1305 library interface"
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n
+ help
+ Enable the Poly1305 library interface. This interface may be fulfilled
+ by either the generic implementation or an arch-specific one, if one
+ is available and enabled.
+
+config CRYPTO_LIB_CHACHA20POLY1305
+ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)"
+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA
+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305
+ depends on CRYPTO
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
+ select CRYPTO_ALGAPI
+
+config CRYPTO_LIB_SHA1
+ tristate
+
+config CRYPTO_LIB_SHA256
+ tristate
+
+endmenu
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
new file mode 100644
index 000000000..8d1446c2b
--- /dev/null
+++ b/lib/crypto/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CRYPTO_LIB_UTILS) += libcryptoutils.o
+libcryptoutils-y := memneq.o utils.o
+
+# chacha is used by the /dev/random driver which is always builtin
+obj-y += chacha.o
+obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
+
+obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
+libaes-y := aes.o
+
+obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o
+libaesgcm-y := aesgcm.o
+
+obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
+libarc4-y := arc4.o
+
+obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
+
+# blake2s is used by the /dev/random driver which is always builtin
+obj-y += libblake2s.o
+libblake2s-y := blake2s.o
+libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o
+
+obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o
+libchacha20poly1305-y += chacha20poly1305.o
+
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o
+libcurve25519-generic-y := curve25519-fiat32.o
+libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o
+libcurve25519-generic-y += curve25519-generic.o
+
+obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o
+libcurve25519-y += curve25519.o
+
+obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o
+libdes-y := des.o
+
+obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o
+libpoly1305-y := poly1305-donna32.o
+libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
+libpoly1305-y += poly1305.o
+
+obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o
+libsha1-y := sha1.o
+
+obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
+libsha256-y := sha256.o
+
+ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
+libblake2s-y += blake2s-selftest.o
+libchacha20poly1305-y += chacha20poly1305-selftest.o
+libcurve25519-y += curve25519-selftest.o
+endif
+
+obj-$(CONFIG_MPILIB) += mpi/
diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c
new file mode 100644
index 000000000..827fe8992
--- /dev/null
+++ b/lib/crypto/aes.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2019 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <crypto/aes.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+/*
+ * Emit the sbox as volatile const to prevent the compiler from doing
+ * constant folding on sbox references involving fixed indexes.
+ */
+static volatile const u8 __cacheline_aligned aes_sbox[] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
+};
+
+static volatile const u8 __cacheline_aligned aes_inv_sbox[] = {
+ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
+ 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
+ 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
+ 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
+ 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
+ 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
+ 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
+ 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
+ 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
+ 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
+ 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
+ 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
+ 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
+ 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
+ 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
+ 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
+ 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
+ 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
+ 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
+ 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
+ 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
+ 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
+};
+
+extern const u8 crypto_aes_sbox[256] __alias(aes_sbox);
+extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox);
+
+EXPORT_SYMBOL(crypto_aes_sbox);
+EXPORT_SYMBOL(crypto_aes_inv_sbox);
+
+static u32 mul_by_x(u32 w)
+{
+ u32 x = w & 0x7f7f7f7f;
+ u32 y = w & 0x80808080;
+
+ /* multiply by polynomial 'x' (0b10) in GF(2^8) */
+ return (x << 1) ^ (y >> 7) * 0x1b;
+}
+
+static u32 mul_by_x2(u32 w)
+{
+ u32 x = w & 0x3f3f3f3f;
+ u32 y = w & 0x80808080;
+ u32 z = w & 0x40404040;
+
+ /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */
+ return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b;
+}
+
+static u32 mix_columns(u32 x)
+{
+ /*
+ * Perform the following matrix multiplication in GF(2^8)
+ *
+ * | 0x2 0x3 0x1 0x1 | | x[0] |
+ * | 0x1 0x2 0x3 0x1 | | x[1] |
+ * | 0x1 0x1 0x2 0x3 | x | x[2] |
+ * | 0x3 0x1 0x1 0x2 | | x[3] |
+ */
+ u32 y = mul_by_x(x) ^ ror32(x, 16);
+
+ return y ^ ror32(x ^ y, 8);
+}
+
+static u32 inv_mix_columns(u32 x)
+{
+ /*
+ * Perform the following matrix multiplication in GF(2^8)
+ *
+ * | 0xe 0xb 0xd 0x9 | | x[0] |
+ * | 0x9 0xe 0xb 0xd | | x[1] |
+ * | 0xd 0x9 0xe 0xb | x | x[2] |
+ * | 0xb 0xd 0x9 0xe | | x[3] |
+ *
+ * which can conveniently be reduced to
+ *
+ * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] |
+ * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] |
+ * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] |
+ * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] |
+ */
+ u32 y = mul_by_x2(x);
+
+ return mix_columns(x ^ y ^ ror32(y, 16));
+}
+
+static __always_inline u32 subshift(u32 in[], int pos)
+{
+ return (aes_sbox[in[pos] & 0xff]) ^
+ (aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
+ (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
+ (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
+}
+
+static __always_inline u32 inv_subshift(u32 in[], int pos)
+{
+ return (aes_inv_sbox[in[pos] & 0xff]) ^
+ (aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
+ (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
+ (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
+}
+
+static u32 subw(u32 in)
+{
+ return (aes_sbox[in & 0xff]) ^
+ (aes_sbox[(in >> 8) & 0xff] << 8) ^
+ (aes_sbox[(in >> 16) & 0xff] << 16) ^
+ (aes_sbox[(in >> 24) & 0xff] << 24);
+}
+
+/**
+ * aes_expandkey - Expands the AES key as described in FIPS-197
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
+ * key schedule plus a 16 bytes key which is used before the first round).
+ * The decryption key is prepared for the "Equivalent Inverse Cipher" as
+ * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
+ * for the initial combination, the second slot for the first round and so on.
+ */
+int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ u32 kwords = key_len / sizeof(u32);
+ u32 rc, i, j;
+ int err;
+
+ err = aes_check_keylen(key_len);
+ if (err)
+ return err;
+
+ ctx->key_length = key_len;
+
+ for (i = 0; i < kwords; i++)
+ ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
+
+ for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
+ u32 *rki = ctx->key_enc + (i * kwords);
+ u32 *rko = rki + kwords;
+
+ rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
+ rko[1] = rko[0] ^ rki[1];
+ rko[2] = rko[1] ^ rki[2];
+ rko[3] = rko[2] ^ rki[3];
+
+ if (key_len == AES_KEYSIZE_192) {
+ if (i >= 7)
+ break;
+ rko[4] = rko[3] ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ } else if (key_len == AES_KEYSIZE_256) {
+ if (i >= 6)
+ break;
+ rko[4] = subw(rko[3]) ^ rki[4];
+ rko[5] = rko[4] ^ rki[5];
+ rko[6] = rko[5] ^ rki[6];
+ rko[7] = rko[6] ^ rki[7];
+ }
+ }
+
+ /*
+ * Generate the decryption keys for the Equivalent Inverse Cipher.
+ * This involves reversing the order of the round keys, and applying
+ * the Inverse Mix Columns transformation to all but the first and
+ * the last one.
+ */
+ ctx->key_dec[0] = ctx->key_enc[key_len + 24];
+ ctx->key_dec[1] = ctx->key_enc[key_len + 25];
+ ctx->key_dec[2] = ctx->key_enc[key_len + 26];
+ ctx->key_dec[3] = ctx->key_enc[key_len + 27];
+
+ for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
+ ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]);
+ ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
+ ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
+ ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
+ }
+
+ ctx->key_dec[i] = ctx->key_enc[0];
+ ctx->key_dec[i + 1] = ctx->key_enc[1];
+ ctx->key_dec[i + 2] = ctx->key_enc[2];
+ ctx->key_dec[i + 3] = ctx->key_enc[3];
+
+ return 0;
+}
+EXPORT_SYMBOL(aes_expandkey);
+
+/**
+ * aes_encrypt - Encrypt a single AES block
+ * @ctx: Context struct containing the key schedule
+ * @out: Buffer to store the ciphertext
+ * @in: Buffer containing the plaintext
+ */
+void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
+{
+ const u32 *rkp = ctx->key_enc + 4;
+ int rounds = 6 + ctx->key_length / 4;
+ u32 st0[4], st1[4];
+ int round;
+
+ st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
+ st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
+ st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
+ st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
+
+ /*
+ * Force the compiler to emit data independent Sbox references,
+ * by xoring the input with Sbox values that are known to add up
+ * to zero. This pulls the entire Sbox into the D-cache before any
+ * data dependent lookups are done.
+ */
+ st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
+ st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
+ st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
+ st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
+
+ for (round = 0;; round += 2, rkp += 8) {
+ st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
+ st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
+ st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
+ st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
+
+ if (round == rounds - 2)
+ break;
+
+ st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
+ st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
+ st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
+ st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
+ }
+
+ put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
+ put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
+ put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
+ put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
+}
+EXPORT_SYMBOL(aes_encrypt);
+
+/**
+ * aes_decrypt - Decrypt a single AES block
+ * @ctx: Context struct containing the key schedule
+ * @out: Buffer to store the plaintext
+ * @in: Buffer containing the ciphertext
+ */
+void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
+{
+ const u32 *rkp = ctx->key_dec + 4;
+ int rounds = 6 + ctx->key_length / 4;
+ u32 st0[4], st1[4];
+ int round;
+
+ st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
+ st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
+ st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
+ st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
+
+ /*
+ * Force the compiler to emit data independent Sbox references,
+ * by xoring the input with Sbox values that are known to add up
+ * to zero. This pulls the entire Sbox into the D-cache before any
+ * data dependent lookups are done.
+ */
+ st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
+ st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
+ st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
+ st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
+
+ for (round = 0;; round += 2, rkp += 8) {
+ st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
+ st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
+ st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
+ st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
+
+ if (round == rounds - 2)
+ break;
+
+ st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
+ st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
+ st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
+ st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
+ }
+
+ put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
+ put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
+ put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
+ put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
+}
+EXPORT_SYMBOL(aes_decrypt);
+
+MODULE_DESCRIPTION("Generic AES library");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
new file mode 100644
index 000000000..c632d6e17
--- /dev/null
+++ b/lib/crypto/aesgcm.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Minimal library implementation of GCM
+ *
+ * Copyright 2022 Google LLC
+ */
+
+#include <linux/module.h>
+
+#include <crypto/algapi.h>
+#include <crypto/gcm.h>
+#include <crypto/ghash.h>
+
+#include <asm/irqflags.h>
+
+static void aesgcm_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
+ const void *src)
+{
+ unsigned long flags;
+
+ /*
+ * In AES-GCM, both the GHASH key derivation and the CTR mode
+ * encryption operate on known plaintext, making them susceptible to
+ * timing attacks on the encryption key. The AES library already
+ * mitigates this risk to some extent by pulling the entire S-box into
+ * the caches before doing any substitutions, but this strategy is more
+ * effective when running with interrupts disabled.
+ */
+ local_irq_save(flags);
+ aes_encrypt(ctx, dst, src);
+ local_irq_restore(flags);
+}
+
+/**
+ * aesgcm_expandkey - Expands the AES and GHASH keys for the AES-GCM key
+ * schedule
+ *
+ * @ctx: The data structure that will hold the AES-GCM key schedule
+ * @key: The AES encryption input key
+ * @keysize: The length in bytes of the input key
+ * @authsize: The size in bytes of the GCM authentication tag
+ *
+ * Returns: 0 on success, or -EINVAL if @keysize or @authsize contain values
+ * that are not permitted by the GCM specification.
+ */
+int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
+ unsigned int keysize, unsigned int authsize)
+{
+ u8 kin[AES_BLOCK_SIZE] = {};
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize) ?:
+ aes_expandkey(&ctx->aes_ctx, key, keysize);
+ if (ret)
+ return ret;
+
+ ctx->authsize = authsize;
+ aesgcm_encrypt_block(&ctx->aes_ctx, &ctx->ghash_key, kin);
+
+ return 0;
+}
+EXPORT_SYMBOL(aesgcm_expandkey);
+
+static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src,
+ int len)
+{
+ while (len > 0) {
+ crypto_xor((u8 *)ghash, src, min(len, GHASH_BLOCK_SIZE));
+ gf128mul_lle(ghash, key);
+
+ src += GHASH_BLOCK_SIZE;
+ len -= GHASH_BLOCK_SIZE;
+ }
+}
+
+static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len,
+ const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag)
+{
+ be128 tail = { cpu_to_be64(assoc_len * 8), cpu_to_be64(src_len * 8) };
+ u8 buf[AES_BLOCK_SIZE];
+ be128 ghash = {};
+
+ aesgcm_ghash(&ghash, &ctx->ghash_key, assoc, assoc_len);
+ aesgcm_ghash(&ghash, &ctx->ghash_key, src, src_len);
+ aesgcm_ghash(&ghash, &ctx->ghash_key, &tail, sizeof(tail));
+
+ ctr[3] = cpu_to_be32(1);
+ aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
+ crypto_xor_cpy(authtag, buf, (u8 *)&ghash, ctx->authsize);
+
+ memzero_explicit(&ghash, sizeof(ghash));
+ memzero_explicit(buf, sizeof(buf));
+}
+
+static void aesgcm_crypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int len, __be32 *ctr)
+{
+ u8 buf[AES_BLOCK_SIZE];
+ unsigned int n = 2;
+
+ while (len > 0) {
+ /*
+ * The counter increment below must not result in overflow or
+ * carry into the next 32-bit word, as this could result in
+ * inadvertent IV reuse, which must be avoided at all cost for
+ * stream ciphers such as AES-CTR. Given the range of 'int
+ * len', this cannot happen, so no explicit test is necessary.
+ */
+ ctr[3] = cpu_to_be32(n++);
+ aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
+ crypto_xor_cpy(dst, src, buf, min(len, AES_BLOCK_SIZE));
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+ memzero_explicit(buf, sizeof(buf));
+}
+
+/**
+ * aesgcm_encrypt - Perform AES-GCM encryption on a block of data
+ *
+ * @ctx: The AES-GCM key schedule
+ * @dst: Pointer to the ciphertext output buffer
+ * @src: Pointer the plaintext (may equal @dst for encryption in place)
+ * @crypt_len: The size in bytes of the plaintext and ciphertext.
+ * @assoc: Pointer to the associated data,
+ * @assoc_len: The size in bytes of the associated data
+ * @iv: The initialization vector (IV) to use for this block of data
+ * (must be 12 bytes in size as per the GCM spec recommendation)
+ * @authtag: The address of the buffer in memory where the authentication
+ * tag should be stored. The buffer is assumed to have space for
+ * @ctx->authsize bytes.
+ */
+void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int crypt_len, const u8 *assoc, int assoc_len,
+ const u8 iv[GCM_AES_IV_SIZE], u8 *authtag)
+{
+ __be32 ctr[4];
+
+ memcpy(ctr, iv, GCM_AES_IV_SIZE);
+
+ aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
+ aesgcm_mac(ctx, dst, crypt_len, assoc, assoc_len, ctr, authtag);
+}
+EXPORT_SYMBOL(aesgcm_encrypt);
+
+/**
+ * aesgcm_decrypt - Perform AES-GCM decryption on a block of data
+ *
+ * @ctx: The AES-GCM key schedule
+ * @dst: Pointer to the plaintext output buffer
+ * @src: Pointer the ciphertext (may equal @dst for decryption in place)
+ * @crypt_len: The size in bytes of the plaintext and ciphertext.
+ * @assoc: Pointer to the associated data,
+ * @assoc_len: The size in bytes of the associated data
+ * @iv: The initialization vector (IV) to use for this block of data
+ * (must be 12 bytes in size as per the GCM spec recommendation)
+ * @authtag: The address of the buffer in memory where the authentication
+ * tag is stored.
+ *
+ * Returns: true on success, or false if the ciphertext failed authentication.
+ * On failure, no plaintext will be returned.
+ */
+bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
+ const u8 *src, int crypt_len, const u8 *assoc,
+ int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
+ const u8 *authtag)
+{
+ u8 tagbuf[AES_BLOCK_SIZE];
+ __be32 ctr[4];
+
+ memcpy(ctr, iv, GCM_AES_IV_SIZE);
+
+ aesgcm_mac(ctx, src, crypt_len, assoc, assoc_len, ctr, tagbuf);
+ if (crypto_memneq(authtag, tagbuf, ctx->authsize)) {
+ memzero_explicit(tagbuf, sizeof(tagbuf));
+ return false;
+ }
+ aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
+ return true;
+}
+EXPORT_SYMBOL(aesgcm_decrypt);
+
+MODULE_DESCRIPTION("Generic AES-GCM library");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/*
+ * Test code below. Vectors taken from crypto/testmgr.h
+ */
+
+static const u8 __initconst ctext0[16] =
+ "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
+ "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a";
+
+static const u8 __initconst ptext1[16];
+
+static const u8 __initconst ctext1[32] =
+ "\x03\x88\xda\xce\x60\xb6\xa3\x92"
+ "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
+ "\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
+ "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf";
+
+static const u8 __initconst ptext2[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext2[80] =
+ "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
+ "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
+ "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
+ "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
+ "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
+ "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4";
+
+static const u8 __initconst ptext3[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext3[76] =
+ "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
+ "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
+ "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
+ "\x3d\x58\xe0\x91"
+ "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
+ "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47";
+
+static const u8 __initconst ctext4[16] =
+ "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
+ "\xa0\x0e\xd1\xf3\x12\x57\x24\x35";
+
+static const u8 __initconst ctext5[32] =
+ "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
+ "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
+ "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
+ "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb";
+
+static const u8 __initconst ptext6[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext6[80] =
+ "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
+ "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
+ "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
+ "\xcc\xda\x27\x10\xac\xad\xe2\x56"
+ "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
+ "\xb1\x18\x02\x4d\xb8\x67\x4a\x14";
+
+static const u8 __initconst ctext7[16] =
+ "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
+ "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b";
+
+static const u8 __initconst ctext8[32] =
+ "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
+ "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
+ "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
+ "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19";
+
+static const u8 __initconst ptext9[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext9[80] =
+ "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
+ "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
+ "\xec\x1a\x50\x22\x70\xe3\xcc\x6c";
+
+static const u8 __initconst ptext10[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext10[76] =
+ "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62"
+ "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
+ "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b";
+
+static const u8 __initconst ptext11[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext11[76] =
+ "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
+ "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
+ "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
+ "\xcc\xda\x27\x10"
+ "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
+ "\x37\xba\x55\xbd\x6d\x27\x61\x8c";
+
+static const u8 __initconst ptext12[719] =
+ "\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
+ "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
+ "\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
+ "\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
+ "\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
+ "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
+ "\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
+ "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
+ "\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
+ "\x00\xa4\x43\x54\x04\x54\x9b\x3b"
+ "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
+ "\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
+ "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
+ "\x35\x23\xf4\x20\x41\xd4\xad\x82"
+ "\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
+ "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
+ "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
+ "\xad\x49\x3a\xae\x98\xce\xa6\x66"
+ "\x10\x30\x90\x8c\x55\x83\xd7\x7c"
+ "\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
+ "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
+ "\x57\xcc\x89\x09\x75\x9b\x78\x70"
+ "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
+ "\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
+ "\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
+ "\x82\xd9\xec\xa2\x87\x68\x55\xf9"
+ "\x8f\x9e\x73\x43\x47\x6a\x08\x36"
+ "\x93\x67\xa8\x2d\xde\xac\x41\xa9"
+ "\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
+ "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
+ "\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
+ "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
+ "\xab\x19\x37\xd9\xba\x76\x5e\xd2"
+ "\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
+ "\x02\x66\x49\xca\x7c\x91\x05\xf2"
+ "\x45\x36\x1e\xf5\x77\xad\x1f\x46"
+ "\xa8\x13\xfb\x63\xb6\x08\x99\x63"
+ "\x82\xa2\xed\xb3\xac\xdf\x43\x19"
+ "\x45\xea\x78\x73\xd9\xb7\x39\x11"
+ "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
+ "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
+ "\xa4\x47\x7d\x80\x20\x26\xfd\x63"
+ "\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
+ "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
+ "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
+ "\x54\x03\xa4\x09\x0c\x37\x7a\x15"
+ "\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
+ "\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
+ "\x03\x25\x3c\x8d\x48\x58\x71\x34"
+ "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
+ "\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
+ "\x80\x6f\x96\x0e\x05\x62\xc7\x78"
+ "\x87\x79\x60\x38\x46\xb4\x25\x57"
+ "\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
+ "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
+ "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
+ "\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
+ "\x09\xfc\x91\x96\x47\x87\x4f\x1a"
+ "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
+ "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
+ "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
+ "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
+ "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
+ "\x0b\x63\xde\x87\x42\x79\x8a\x68"
+ "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
+ "\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
+ "\xe9\x83\x84\xcb\x28\x69\x09\x69"
+ "\xce\x99\x46\x00\x54\xcb\xd8\x38"
+ "\xf9\x53\x4a\xbf\x31\xce\x57\x15"
+ "\x33\xfa\x96\x04\x33\x42\xe3\xc0"
+ "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
+ "\x19\x95\xd0\x0e\x82\x07\x63\xf9"
+ "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
+ "\xb5\x9f\x23\x28\x60\xe7\x20\x51"
+ "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
+ "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
+ "\x78\xc6\x91\x22\x40\x91\x80\xbe"
+ "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
+ "\x67\x10\xa4\x83\x98\x79\x23\xe7"
+ "\x92\xda\xa9\x22\x16\xb1\xe7\x78"
+ "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
+ "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
+ "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
+ "\x48\x11\x06\xbb\x2d\xf2\x63\x88"
+ "\x3f\x73\x09\xe2\x45\x56\x31\x51"
+ "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
+ "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
+ "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
+ "\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
+ "\xa4\x78\xdb\x74\x3d\x8b\xb5";
+
+static const u8 __initconst ctext12[735] =
+ "\x84\x0b\xdb\xd5\xb7\xa8\xfe\x20"
+ "\xbb\xb1\x12\x7f\x41\xea\xb3\xc0"
+ "\xa2\xb4\x37\x19\x11\x58\xb6\x0b"
+ "\x4c\x1d\x38\x05\x54\xd1\x16\x73"
+ "\x8e\x1c\x20\x90\xa2\x9a\xb7\x74"
+ "\x47\xe6\xd8\xfc\x18\x3a\xb4\xea"
+ "\xd5\x16\x5a\x2c\x53\x01\x46\xb3"
+ "\x18\x33\x74\x6c\x50\xf2\xe8\xc0"
+ "\x73\xda\x60\x22\xeb\xe3\xe5\x9b"
+ "\x20\x93\x6c\x4b\x37\x99\xb8\x23"
+ "\x3b\x4e\xac\xe8\x5b\xe8\x0f\xb7"
+ "\xc3\x8f\xfb\x4a\x37\xd9\x39\x95"
+ "\x34\xf1\xdb\x8f\x71\xd9\xc7\x0b"
+ "\x02\xf1\x63\xfc\x9b\xfc\xc5\xab"
+ "\xb9\x14\x13\x21\xdf\xce\xaa\x88"
+ "\x44\x30\x1e\xce\x26\x01\x92\xf8"
+ "\x9f\x00\x4b\x0c\x4b\xf7\x5f\xe0"
+ "\x89\xca\x94\x66\x11\x21\x97\xca"
+ "\x3e\x83\x74\x2d\xdb\x4d\x11\xeb"
+ "\x97\xc2\x14\xff\x9e\x1e\xa0\x6b"
+ "\x08\xb4\x31\x2b\x85\xc6\x85\x6c"
+ "\x90\xec\x39\xc0\xec\xb3\xb5\x4e"
+ "\xf3\x9c\xe7\x83\x3a\x77\x0a\xf4"
+ "\x56\xfe\xce\x18\x33\x6d\x0b\x2d"
+ "\x33\xda\xc8\x05\x5c\xb4\x09\x2a"
+ "\xde\x6b\x52\x98\x01\xef\x36\x3d"
+ "\xbd\xf9\x8f\xa8\x3e\xaa\xcd\xd1"
+ "\x01\x2d\x42\x49\xc3\xb6\x84\xbb"
+ "\x48\x96\xe0\x90\x93\x6c\x48\x64"
+ "\xd4\xfa\x7f\x93\x2c\xa6\x21\xc8"
+ "\x7a\x23\x7b\xaa\x20\x56\x12\xae"
+ "\x16\x9d\x94\x0f\x54\xa1\xec\xca"
+ "\x51\x4e\xf2\x39\xf4\xf8\x5f\x04"
+ "\x5a\x0d\xbf\xf5\x83\xa1\x15\xe1"
+ "\xf5\x3c\xd8\x62\xa3\xed\x47\x89"
+ "\x85\x4c\xe5\xdb\xac\x9e\x17\x1d"
+ "\x0c\x09\xe3\x3e\x39\x5b\x4d\x74"
+ "\x0e\xf5\x34\xee\x70\x11\x4c\xfd"
+ "\xdb\x34\xb1\xb5\x10\x3f\x73\xb7"
+ "\xf5\xfa\xed\xb0\x1f\xa5\xcd\x3c"
+ "\x8d\x35\x83\xd4\x11\x44\x6e\x6c"
+ "\x5b\xe0\x0e\x69\xa5\x39\xe5\xbb"
+ "\xa9\x57\x24\x37\xe6\x1f\xdd\xcf"
+ "\x16\x2a\x13\xf9\x6a\x2d\x90\xa0"
+ "\x03\x60\x7a\xed\x69\xd5\x00\x8b"
+ "\x7e\x4f\xcb\xb9\xfa\x91\xb9\x37"
+ "\xc1\x26\xce\x90\x97\x22\x64\x64"
+ "\xc1\x72\x43\x1b\xf6\xac\xc1\x54"
+ "\x8a\x10\x9c\xdd\x8d\xd5\x8e\xb2"
+ "\xe4\x85\xda\xe0\x20\x5f\xf4\xb4"
+ "\x15\xb5\xa0\x8d\x12\x74\x49\x23"
+ "\x3a\xdf\x4a\xd3\xf0\x3b\x89\xeb"
+ "\xf8\xcc\x62\x7b\xfb\x93\x07\x41"
+ "\x61\x26\x94\x58\x70\xa6\x3c\xe4"
+ "\xff\x58\xc4\x13\x3d\xcb\x36\x6b"
+ "\x32\xe5\xb2\x6d\x03\x74\x6f\x76"
+ "\x93\x77\xde\x48\xc4\xfa\x30\x4a"
+ "\xda\x49\x80\x77\x0f\x1c\xbe\x11"
+ "\xc8\x48\xb1\xe5\xbb\xf2\x8a\xe1"
+ "\x96\x2f\x9f\xd1\x8e\x8a\x5c\xe2"
+ "\xf7\xd7\xd8\x54\xf3\x3f\xc4\x91"
+ "\xb8\xfb\x86\xdc\x46\x24\x91\x60"
+ "\x6c\x2f\xc9\x41\x37\x51\x49\x54"
+ "\x09\x81\x21\xf3\x03\x9f\x2b\xe3"
+ "\x1f\x39\x63\xaf\xf4\xd7\x53\x60"
+ "\xa7\xc7\x54\xf9\xee\xb1\xb1\x7d"
+ "\x75\x54\x65\x93\xfe\xb1\x68\x6b"
+ "\x57\x02\xf9\xbb\x0e\xf9\xf8\xbf"
+ "\x01\x12\x27\xb4\xfe\xe4\x79\x7a"
+ "\x40\x5b\x51\x4b\xdf\x38\xec\xb1"
+ "\x6a\x56\xff\x35\x4d\x42\x33\xaa"
+ "\x6f\x1b\xe4\xdc\xe0\xdb\x85\x35"
+ "\x62\x10\xd4\xec\xeb\xc5\x7e\x45"
+ "\x1c\x6f\x17\xca\x3b\x8e\x2d\x66"
+ "\x4f\x4b\x36\x56\xcd\x1b\x59\xaa"
+ "\xd2\x9b\x17\xb9\x58\xdf\x7b\x64"
+ "\x8a\xff\x3b\x9c\xa6\xb5\x48\x9e"
+ "\xaa\xe2\x5d\x09\x71\x32\x5f\xb6"
+ "\x29\xbe\xe7\xc7\x52\x7e\x91\x82"
+ "\x6b\x6d\x33\xe1\x34\x06\x36\x21"
+ "\x5e\xbe\x1e\x2f\x3e\xc1\xfb\xea"
+ "\x49\x2c\xb5\xca\xf7\xb0\x37\xea"
+ "\x1f\xed\x10\x04\xd9\x48\x0d\x1a"
+ "\x1c\xfb\xe7\x84\x0e\x83\x53\x74"
+ "\xc7\x65\xe2\x5c\xe5\xba\x73\x4c"
+ "\x0e\xe1\xb5\x11\x45\x61\x43\x46"
+ "\xaa\x25\x8f\xbd\x85\x08\xfa\x4c"
+ "\x15\xc1\xc0\xd8\xf5\xdc\x16\xbb"
+ "\x7b\x1d\xe3\x87\x57\xa7\x2a\x1d"
+ "\x38\x58\x9e\x8a\x43\xdc\x57"
+ "\xd1\x81\x7d\x2b\xe9\xff\x99\x3a"
+ "\x4b\x24\x52\x58\x55\xe1\x49\x14";
+
+static struct {
+ const u8 *ptext;
+ const u8 *ctext;
+
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[GCM_AES_IV_SIZE];
+ u8 assoc[20];
+
+ int klen;
+ int clen;
+ int plen;
+ int alen;
+} const aesgcm_tv[] __initconst = {
+ { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
+ .klen = 16,
+ .ctext = ctext0,
+ .clen = sizeof(ctext0),
+ }, {
+ .klen = 16,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext1,
+ .clen = sizeof(ctext1),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext2,
+ .plen = sizeof(ptext2),
+ .ctext = ctext2,
+ .clen = sizeof(ctext2),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext3,
+ .plen = sizeof(ptext3),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext3,
+ .clen = sizeof(ctext3),
+ }, {
+ .klen = 24,
+ .ctext = ctext4,
+ .clen = sizeof(ctext4),
+ }, {
+ .klen = 24,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext5,
+ .clen = sizeof(ctext5),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 24,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext6,
+ .plen = sizeof(ptext6),
+ .ctext = ctext6,
+ .clen = sizeof(ctext6),
+ }, {
+ .klen = 32,
+ .ctext = ctext7,
+ .clen = sizeof(ctext7),
+ }, {
+ .klen = 32,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext8,
+ .clen = sizeof(ctext8),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 32,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext9,
+ .plen = sizeof(ptext9),
+ .ctext = ctext9,
+ .clen = sizeof(ctext9),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 32,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext10,
+ .plen = sizeof(ptext10),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext10,
+ .clen = sizeof(ctext10),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 24,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext11,
+ .plen = sizeof(ptext11),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext11,
+ .clen = sizeof(ctext11),
+ }, {
+ .key = "\x62\x35\xf8\x95\xfc\xa5\xeb\xf6"
+ "\x0e\x92\x12\x04\xd3\xa1\x3f\x2e"
+ "\x8b\x32\xcf\xe7\x44\xed\x13\x59"
+ "\x04\x38\x77\xb0\xb9\xad\xb4\x38",
+ .klen = 32,
+ .iv = "\x00\xff\xff\xff\xff\x00\x00\xff"
+ "\xff\xff\x00\xff",
+ .ptext = ptext12,
+ .plen = sizeof(ptext12),
+ .ctext = ctext12,
+ .clen = sizeof(ctext12),
+ }
+};
+
+static int __init libaesgcm_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(aesgcm_tv); i++) {
+ u8 tagbuf[AES_BLOCK_SIZE];
+ int plen = aesgcm_tv[i].plen;
+ struct aesgcm_ctx ctx;
+ u8 buf[sizeof(ptext12)];
+
+ if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
+ aesgcm_tv[i].clen - plen)) {
+ pr_err("aesgcm_expandkey() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ if (!aesgcm_decrypt(&ctx, buf, aesgcm_tv[i].ctext, plen,
+ aesgcm_tv[i].assoc, aesgcm_tv[i].alen,
+ aesgcm_tv[i].iv, aesgcm_tv[i].ctext + plen)
+ || memcmp(buf, aesgcm_tv[i].ptext, plen)) {
+ pr_err("aesgcm_decrypt() #1 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* encrypt in place */
+ aesgcm_encrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
+ aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf);
+ if (memcmp(buf, aesgcm_tv[i].ctext, plen)) {
+ pr_err("aesgcm_encrypt() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* decrypt in place */
+ if (!aesgcm_decrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
+ aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf)
+ || memcmp(buf, aesgcm_tv[i].ptext, plen)) {
+ pr_err("aesgcm_decrypt() #2 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+module_init(libaesgcm_init);
+
+static void __exit libaesgcm_exit(void)
+{
+}
+module_exit(libaesgcm_exit);
+#endif
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
new file mode 100644
index 000000000..c2020f19c
--- /dev/null
+++ b/lib/crypto/arc4.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API
+ *
+ * ARC4 Cipher Algorithm
+ *
+ * Jon Oberheide <jon@oberheide.org>
+ */
+
+#include <crypto/arc4.h>
+#include <linux/module.h>
+
+int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
+{
+ int i, j = 0, k = 0;
+
+ ctx->x = 1;
+ ctx->y = 0;
+
+ for (i = 0; i < 256; i++)
+ ctx->S[i] = i;
+
+ for (i = 0; i < 256; i++) {
+ u32 a = ctx->S[i];
+
+ j = (j + in_key[k] + a) & 0xff;
+ ctx->S[i] = ctx->S[j];
+ ctx->S[j] = a;
+ if (++k >= key_len)
+ k = 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(arc4_setkey);
+
+void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
+{
+ u32 *const S = ctx->S;
+ u32 x, y, a, b;
+ u32 ty, ta, tb;
+
+ if (len == 0)
+ return;
+
+ x = ctx->x;
+ y = ctx->y;
+
+ a = S[x];
+ y = (y + a) & 0xff;
+ b = S[y];
+
+ do {
+ S[y] = a;
+ a = (a + b) & 0xff;
+ S[x] = b;
+ x = (x + 1) & 0xff;
+ ta = S[x];
+ ty = (y + ta) & 0xff;
+ tb = S[ty];
+ *out++ = *in++ ^ S[a];
+ if (--len == 0)
+ break;
+ y = ty;
+ a = ta;
+ b = tb;
+ } while (true);
+
+ ctx->x = x;
+ ctx->y = y;
+}
+EXPORT_SYMBOL(arc4_crypt);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c
new file mode 100644
index 000000000..3b6dcfdd9
--- /dev/null
+++ b/lib/crypto/blake2s-generic.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the BLAKE2s hash and PRF functions.
+ *
+ * Information: https://blake2.net/
+ *
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <asm/unaligned.h>
+
+static const u8 blake2s_sigma[10][16] = {
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
+ { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
+ { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
+ { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
+ { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
+ { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
+ { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
+ { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
+ { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
+ { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
+};
+
+static inline void blake2s_increment_counter(struct blake2s_state *state,
+ const u32 inc)
+{
+ state->t[0] += inc;
+ state->t[1] += (state->t[0] < inc);
+}
+
+void blake2s_compress(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc)
+ __weak __alias(blake2s_compress_generic);
+
+void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
+ size_t nblocks, const u32 inc)
+{
+ u32 m[16];
+ u32 v[16];
+ int i;
+
+ WARN_ON(IS_ENABLED(DEBUG) &&
+ (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
+
+ while (nblocks > 0) {
+ blake2s_increment_counter(state, inc);
+ memcpy(m, block, BLAKE2S_BLOCK_SIZE);
+ le32_to_cpu_array(m, ARRAY_SIZE(m));
+ memcpy(v, state->h, 32);
+ v[ 8] = BLAKE2S_IV0;
+ v[ 9] = BLAKE2S_IV1;
+ v[10] = BLAKE2S_IV2;
+ v[11] = BLAKE2S_IV3;
+ v[12] = BLAKE2S_IV4 ^ state->t[0];
+ v[13] = BLAKE2S_IV5 ^ state->t[1];
+ v[14] = BLAKE2S_IV6 ^ state->f[0];
+ v[15] = BLAKE2S_IV7 ^ state->f[1];
+
+#define G(r, i, a, b, c, d) do { \
+ a += b + m[blake2s_sigma[r][2 * i + 0]]; \
+ d = ror32(d ^ a, 16); \
+ c += d; \
+ b = ror32(b ^ c, 12); \
+ a += b + m[blake2s_sigma[r][2 * i + 1]]; \
+ d = ror32(d ^ a, 8); \
+ c += d; \
+ b = ror32(b ^ c, 7); \
+} while (0)
+
+#define ROUND(r) do { \
+ G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
+ G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
+ G(r, 2, v[2], v[ 6], v[10], v[14]); \
+ G(r, 3, v[3], v[ 7], v[11], v[15]); \
+ G(r, 4, v[0], v[ 5], v[10], v[15]); \
+ G(r, 5, v[1], v[ 6], v[11], v[12]); \
+ G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
+ G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
+} while (0)
+ ROUND(0);
+ ROUND(1);
+ ROUND(2);
+ ROUND(3);
+ ROUND(4);
+ ROUND(5);
+ ROUND(6);
+ ROUND(7);
+ ROUND(8);
+ ROUND(9);
+
+#undef G
+#undef ROUND
+
+ for (i = 0; i < 8; ++i)
+ state->h[i] ^= v[i] ^ v[i + 8];
+
+ block += BLAKE2S_BLOCK_SIZE;
+ --nblocks;
+ }
+}
+
+EXPORT_SYMBOL(blake2s_compress_generic);
diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
new file mode 100644
index 000000000..d0634ed6a
--- /dev/null
+++ b/lib/crypto/blake2s-selftest.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+/*
+ * blake2s_testvecs[] generated with the program below (using libb2-dev and
+ * libssl-dev [OpenSSL])
+ *
+ * #include <blake2.h>
+ * #include <stdint.h>
+ * #include <stdio.h>
+ *
+ * #include <openssl/evp.h>
+ *
+ * #define BLAKE2S_TESTVEC_COUNT 256
+ *
+ * static void print_vec(const uint8_t vec[], int len)
+ * {
+ * int i;
+ *
+ * printf(" { ");
+ * for (i = 0; i < len; i++) {
+ * if (i && (i % 12) == 0)
+ * printf("\n ");
+ * printf("0x%02x, ", vec[i]);
+ * }
+ * printf("},\n");
+ * }
+ *
+ * int main(void)
+ * {
+ * uint8_t key[BLAKE2S_KEYBYTES];
+ * uint8_t buf[BLAKE2S_TESTVEC_COUNT];
+ * uint8_t hash[BLAKE2S_OUTBYTES];
+ * int i, j;
+ *
+ * key[0] = key[1] = 1;
+ * for (i = 2; i < BLAKE2S_KEYBYTES; ++i)
+ * key[i] = key[i - 2] + key[i - 1];
+ *
+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i)
+ * buf[i] = (uint8_t)i;
+ *
+ * printf("static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
+ *
+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) {
+ * int outlen = 1 + i % BLAKE2S_OUTBYTES;
+ * int keylen = (13 * i) % (BLAKE2S_KEYBYTES + 1);
+ *
+ * blake2s(hash, buf, key + BLAKE2S_KEYBYTES - keylen, outlen, i,
+ * keylen);
+ * print_vec(hash, outlen);
+ * }
+ * printf("};\n\n");
+ *
+ * return 0;
+ *}
+ */
+static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
+ { 0xa1, },
+ { 0x7c, 0x89, },
+ { 0x74, 0x0e, 0xd4, },
+ { 0x47, 0x0c, 0x21, 0x15, },
+ { 0x18, 0xd6, 0x9c, 0xa6, 0xc4, },
+ { 0x13, 0x5d, 0x16, 0x63, 0x2e, 0xf9, },
+ { 0x2c, 0xb5, 0x04, 0xb7, 0x99, 0xe2, 0x73, },
+ { 0x9a, 0x0f, 0xd2, 0x39, 0xd6, 0x68, 0x1b, 0x92, },
+ { 0xc8, 0xde, 0x7a, 0xea, 0x2f, 0xf4, 0xd2, 0xe3, 0x2b, },
+ { 0x5b, 0xf9, 0x43, 0x52, 0x0c, 0x12, 0xba, 0xb5, 0x93, 0x9f, },
+ { 0xc6, 0x2c, 0x4e, 0x80, 0xfc, 0x32, 0x5b, 0x33, 0xb8, 0xb8, 0x0a, },
+ { 0xa7, 0x5c, 0xfd, 0x3a, 0xcc, 0xbf, 0x90, 0xca, 0xb7, 0x97, 0xde, 0xd8, },
+ { 0x66, 0xca, 0x3c, 0xc4, 0x19, 0xef, 0x92, 0x66, 0x3f, 0x21, 0x8f, 0xda,
+ 0xb7, },
+ { 0xba, 0xe5, 0xbb, 0x30, 0x25, 0x94, 0x6d, 0xc3, 0x89, 0x09, 0xc4, 0x25,
+ 0x52, 0x3e, },
+ { 0xa2, 0xef, 0x0e, 0x52, 0x0b, 0x5f, 0xa2, 0x01, 0x6d, 0x0a, 0x25, 0xbc,
+ 0x57, 0xe2, 0x27, },
+ { 0x4f, 0xe0, 0xf9, 0x52, 0x12, 0xda, 0x84, 0xb7, 0xab, 0xae, 0xb0, 0xa6,
+ 0x47, 0x2a, 0xc7, 0xf5, },
+ { 0x56, 0xe7, 0xa8, 0x1c, 0x4c, 0xca, 0xed, 0x90, 0x31, 0xec, 0x87, 0x43,
+ 0xe7, 0x72, 0x08, 0xec, 0xbe, },
+ { 0x7e, 0xdf, 0x80, 0x1c, 0x93, 0x33, 0xfd, 0x53, 0x44, 0xba, 0xfd, 0x96,
+ 0xe1, 0xbb, 0xb5, 0x65, 0xa5, 0x00, },
+ { 0xec, 0x6b, 0xed, 0xf7, 0x7b, 0x62, 0x1d, 0x7d, 0xf4, 0x82, 0xf3, 0x1e,
+ 0x18, 0xff, 0x2b, 0xc4, 0x06, 0x20, 0x2a, },
+ { 0x74, 0x98, 0xd7, 0x68, 0x63, 0xed, 0x87, 0xe4, 0x5d, 0x8d, 0x9e, 0x1d,
+ 0xfd, 0x2a, 0xbb, 0x86, 0xac, 0xe9, 0x2a, 0x89, },
+ { 0x89, 0xc3, 0x88, 0xce, 0x2b, 0x33, 0x1e, 0x10, 0xd1, 0x37, 0x20, 0x86,
+ 0x28, 0x43, 0x70, 0xd9, 0xfb, 0x96, 0xd9, 0xb5, 0xd3, },
+ { 0xcb, 0x56, 0x74, 0x41, 0x8d, 0x80, 0x01, 0x9a, 0x6b, 0x38, 0xe1, 0x41,
+ 0xad, 0x9c, 0x62, 0x74, 0xce, 0x35, 0xd5, 0x6c, 0x89, 0x6e, },
+ { 0x79, 0xaf, 0x94, 0x59, 0x99, 0x26, 0xe1, 0xc9, 0x34, 0xfe, 0x7c, 0x22,
+ 0xf7, 0x43, 0xd7, 0x65, 0xd4, 0x48, 0x18, 0xac, 0x3d, 0xfd, 0x93, },
+ { 0x85, 0x0d, 0xff, 0xb8, 0x3e, 0x87, 0x41, 0xb0, 0x95, 0xd3, 0x3d, 0x00,
+ 0x47, 0x55, 0x9e, 0xd2, 0x69, 0xea, 0xbf, 0xe9, 0x7a, 0x2d, 0x61, 0x45, },
+ { 0x03, 0xe0, 0x85, 0xec, 0x54, 0xb5, 0x16, 0x53, 0xa8, 0xc4, 0x71, 0xe9,
+ 0x6a, 0xe7, 0xcb, 0xc4, 0x15, 0x02, 0xfc, 0x34, 0xa4, 0xa4, 0x28, 0x13,
+ 0xd1, },
+ { 0xe3, 0x34, 0x4b, 0xe1, 0xd0, 0x4b, 0x55, 0x61, 0x8f, 0xc0, 0x24, 0x05,
+ 0xe6, 0xe0, 0x3d, 0x70, 0x24, 0x4d, 0xda, 0xb8, 0x91, 0x05, 0x29, 0x07,
+ 0x01, 0x3e, },
+ { 0x61, 0xff, 0x01, 0x72, 0xb1, 0x4d, 0xf6, 0xfe, 0xd1, 0xd1, 0x08, 0x74,
+ 0xe6, 0x91, 0x44, 0xeb, 0x61, 0xda, 0x40, 0xaf, 0xfc, 0x8c, 0x91, 0x6b,
+ 0xec, 0x13, 0xed, },
+ { 0xd4, 0x40, 0xd2, 0xa0, 0x7f, 0xc1, 0x58, 0x0c, 0x85, 0xa0, 0x86, 0xc7,
+ 0x86, 0xb9, 0x61, 0xc9, 0xea, 0x19, 0x86, 0x1f, 0xab, 0x07, 0xce, 0x37,
+ 0x72, 0x67, 0x09, 0xfc, },
+ { 0x9e, 0xf8, 0x18, 0x67, 0x93, 0x10, 0x9b, 0x39, 0x75, 0xe8, 0x8b, 0x38,
+ 0x82, 0x7d, 0xb8, 0xb7, 0xa5, 0xaf, 0xe6, 0x6a, 0x22, 0x5e, 0x1f, 0x9c,
+ 0x95, 0x29, 0x19, 0xf2, 0x4b, },
+ { 0xc8, 0x62, 0x25, 0xf5, 0x98, 0xc9, 0xea, 0xe5, 0x29, 0x3a, 0xd3, 0x22,
+ 0xeb, 0xeb, 0x07, 0x7c, 0x15, 0x07, 0xee, 0x15, 0x61, 0xbb, 0x05, 0x30,
+ 0x99, 0x7f, 0x11, 0xf6, 0x0a, 0x1d, },
+ { 0x68, 0x70, 0xf7, 0x90, 0xa1, 0x8b, 0x1f, 0x0f, 0xbb, 0xce, 0xd2, 0x0e,
+ 0x33, 0x1f, 0x7f, 0xa9, 0x78, 0xa8, 0xa6, 0x81, 0x66, 0xab, 0x8d, 0xcd,
+ 0x58, 0x55, 0x3a, 0x0b, 0x7a, 0xdb, 0xb5, },
+ { 0xdd, 0x35, 0xd2, 0xb4, 0xf6, 0xc7, 0xea, 0xab, 0x64, 0x24, 0x4e, 0xfe,
+ 0xe5, 0x3d, 0x4e, 0x95, 0x8b, 0x6d, 0x6c, 0xbc, 0xb0, 0xf8, 0x88, 0x61,
+ 0x09, 0xb7, 0x78, 0xa3, 0x31, 0xfe, 0xd9, 0x2f, },
+ { 0x0a, },
+ { 0x6e, 0xd4, },
+ { 0x64, 0xe9, 0xd1, },
+ { 0x30, 0xdd, 0x71, 0xef, },
+ { 0x11, 0xb5, 0x0c, 0x87, 0xc9, },
+ { 0x06, 0x1c, 0x6d, 0x04, 0x82, 0xd0, },
+ { 0x5c, 0x42, 0x0b, 0xee, 0xc5, 0x9c, 0xb2, },
+ { 0xe8, 0x29, 0xd6, 0xb4, 0x5d, 0xf7, 0x2b, 0x93, },
+ { 0x18, 0xca, 0x27, 0x72, 0x43, 0x39, 0x16, 0xbc, 0x6a, },
+ { 0x39, 0x8f, 0xfd, 0x64, 0xf5, 0x57, 0x23, 0xb0, 0x45, 0xf8, },
+ { 0xbb, 0x3a, 0x78, 0x6b, 0x02, 0x1d, 0x0b, 0x16, 0xe3, 0xb2, 0x9a, },
+ { 0xb8, 0xb4, 0x0b, 0xe5, 0xd4, 0x1d, 0x0d, 0x85, 0x49, 0x91, 0x35, 0xfa, },
+ { 0x6d, 0x48, 0x2a, 0x0c, 0x42, 0x08, 0xbd, 0xa9, 0x78, 0x6f, 0x18, 0xaf,
+ 0xe2, },
+ { 0x10, 0x45, 0xd4, 0x58, 0x88, 0xec, 0x4e, 0x1e, 0xf6, 0x14, 0x92, 0x64,
+ 0x7e, 0xb0, },
+ { 0x8b, 0x0b, 0x95, 0xee, 0x92, 0xc6, 0x3b, 0x91, 0xf1, 0x1e, 0xeb, 0x51,
+ 0x98, 0x0a, 0x8d, },
+ { 0xa3, 0x50, 0x4d, 0xa5, 0x1d, 0x03, 0x68, 0xe9, 0x57, 0x78, 0xd6, 0x04,
+ 0xf1, 0xc3, 0x94, 0xd8, },
+ { 0xb8, 0x66, 0x6e, 0xdd, 0x46, 0x15, 0xae, 0x3d, 0x83, 0x7e, 0xcf, 0xe7,
+ 0x2c, 0xe8, 0x8f, 0xc7, 0x34, },
+ { 0x2e, 0xc0, 0x1f, 0x29, 0xea, 0xf6, 0xb9, 0xe2, 0xc2, 0x93, 0xeb, 0x41,
+ 0x0d, 0xf0, 0x0a, 0x13, 0x0e, 0xa2, },
+ { 0x71, 0xb8, 0x33, 0xa9, 0x1b, 0xac, 0xf1, 0xb5, 0x42, 0x8f, 0x5e, 0x81,
+ 0x34, 0x43, 0xb7, 0xa4, 0x18, 0x5c, 0x47, },
+ { 0xda, 0x45, 0xb8, 0x2e, 0x82, 0x1e, 0xc0, 0x59, 0x77, 0x9d, 0xfa, 0xb4,
+ 0x1c, 0x5e, 0xa0, 0x2b, 0x33, 0x96, 0x5a, 0x58, },
+ { 0xe3, 0x09, 0x05, 0xa9, 0xeb, 0x48, 0x13, 0xad, 0x71, 0x88, 0x81, 0x9a,
+ 0x3e, 0x2c, 0xe1, 0x23, 0x99, 0x13, 0x35, 0x9f, 0xb5, },
+ { 0xb7, 0x86, 0x2d, 0x16, 0xe1, 0x04, 0x00, 0x47, 0x47, 0x61, 0x31, 0xfb,
+ 0x14, 0xac, 0xd8, 0xe9, 0xe3, 0x49, 0xbd, 0xf7, 0x9c, 0x3f, },
+ { 0x7f, 0xd9, 0x95, 0xa8, 0xa7, 0xa0, 0xcc, 0xba, 0xef, 0xb1, 0x0a, 0xa9,
+ 0x21, 0x62, 0x08, 0x0f, 0x1b, 0xff, 0x7b, 0x9d, 0xae, 0xb2, 0x95, },
+ { 0x85, 0x99, 0xea, 0x33, 0xe0, 0x56, 0xff, 0x13, 0xc6, 0x61, 0x8c, 0xf9,
+ 0x57, 0x05, 0x03, 0x11, 0xf9, 0xfb, 0x3a, 0xf7, 0xce, 0xbb, 0x52, 0x30, },
+ { 0xb2, 0x72, 0x9c, 0xf8, 0x77, 0x4e, 0x8f, 0x6b, 0x01, 0x6c, 0xff, 0x4e,
+ 0x4f, 0x02, 0xd2, 0xbc, 0xeb, 0x51, 0x28, 0x99, 0x50, 0xab, 0xc4, 0x42,
+ 0xe3, },
+ { 0x8b, 0x0a, 0xb5, 0x90, 0x8f, 0xf5, 0x7b, 0xdd, 0xba, 0x47, 0x37, 0xc9,
+ 0x2a, 0xd5, 0x4b, 0x25, 0x08, 0x8b, 0x02, 0x17, 0xa7, 0x9e, 0x6b, 0x6e,
+ 0xe3, 0x90, },
+ { 0x90, 0xdd, 0xf7, 0x75, 0xa7, 0xa3, 0x99, 0x5e, 0x5b, 0x7d, 0x75, 0xc3,
+ 0x39, 0x6b, 0xa0, 0xe2, 0x44, 0x53, 0xb1, 0x9e, 0xc8, 0xf1, 0x77, 0x10,
+ 0x58, 0x06, 0x9a, },
+ { 0x99, 0x52, 0xf0, 0x49, 0xa8, 0x8c, 0xec, 0xa6, 0x97, 0x32, 0x13, 0xb5,
+ 0xf7, 0xa3, 0x8e, 0xfb, 0x4b, 0x59, 0x31, 0x3d, 0x01, 0x59, 0x98, 0x5d,
+ 0x53, 0x03, 0x1a, 0x39, },
+ { 0x9f, 0xe0, 0xc2, 0xe5, 0x5d, 0x93, 0xd6, 0x9b, 0x47, 0x8f, 0x9b, 0xe0,
+ 0x26, 0x35, 0x84, 0x20, 0x1d, 0xc5, 0x53, 0x10, 0x0f, 0x22, 0xb9, 0xb5,
+ 0xd4, 0x36, 0xb1, 0xac, 0x73, },
+ { 0x30, 0x32, 0x20, 0x3b, 0x10, 0x28, 0xec, 0x1f, 0x4f, 0x9b, 0x47, 0x59,
+ 0xeb, 0x7b, 0xee, 0x45, 0xfb, 0x0c, 0x49, 0xd8, 0x3d, 0x69, 0xbd, 0x90,
+ 0x2c, 0xf0, 0x9e, 0x8d, 0xbf, 0xd5, },
+ { 0x2a, 0x37, 0x73, 0x7f, 0xf9, 0x96, 0x19, 0xaa, 0x25, 0xd8, 0x13, 0x28,
+ 0x01, 0x29, 0x89, 0xdf, 0x6e, 0x0c, 0x9b, 0x43, 0x44, 0x51, 0xe9, 0x75,
+ 0x26, 0x0c, 0xb7, 0x87, 0x66, 0x0b, 0x5f, },
+ { 0x23, 0xdf, 0x96, 0x68, 0x91, 0x86, 0xd0, 0x93, 0x55, 0x33, 0x24, 0xf6,
+ 0xba, 0x08, 0x75, 0x5b, 0x59, 0x11, 0x69, 0xb8, 0xb9, 0xe5, 0x2c, 0x77,
+ 0x02, 0xf6, 0x47, 0xee, 0x81, 0xdd, 0xb9, 0x06, },
+ { 0x9d, },
+ { 0x9d, 0x7d, },
+ { 0xfd, 0xc3, 0xda, },
+ { 0xe8, 0x82, 0xcd, 0x21, },
+ { 0xc3, 0x1d, 0x42, 0x4c, 0x74, },
+ { 0xe9, 0xda, 0xf1, 0xa2, 0xe5, 0x7c, },
+ { 0x52, 0xb8, 0x6f, 0x81, 0x5c, 0x3a, 0x4c, },
+ { 0x5b, 0x39, 0x26, 0xfc, 0x92, 0x5e, 0xe0, 0x49, },
+ { 0x59, 0xe4, 0x7c, 0x93, 0x1c, 0xf9, 0x28, 0x93, 0xde, },
+ { 0xde, 0xdf, 0xb2, 0x43, 0x61, 0x0b, 0x86, 0x16, 0x4c, 0x2e, },
+ { 0x14, 0x8f, 0x75, 0x51, 0xaf, 0xb9, 0xee, 0x51, 0x5a, 0xae, 0x23, },
+ { 0x43, 0x5f, 0x50, 0xd5, 0x70, 0xb0, 0x5b, 0x87, 0xf5, 0xd9, 0xb3, 0x6d, },
+ { 0x66, 0x0a, 0x64, 0x93, 0x79, 0x71, 0x94, 0x40, 0xb7, 0x68, 0x2d, 0xd3,
+ 0x63, },
+ { 0x15, 0x00, 0xc4, 0x0c, 0x7d, 0x1b, 0x10, 0xa9, 0x73, 0x1b, 0x90, 0x6f,
+ 0xe6, 0xa9, },
+ { 0x34, 0x75, 0xf3, 0x86, 0x8f, 0x56, 0xcf, 0x2a, 0x0a, 0xf2, 0x62, 0x0a,
+ 0xf6, 0x0e, 0x20, },
+ { 0xb1, 0xde, 0xc9, 0xf5, 0xdb, 0xf3, 0x2f, 0x4c, 0xd6, 0x41, 0x7d, 0x39,
+ 0x18, 0x3e, 0xc7, 0xc3, },
+ { 0xc5, 0x89, 0xb2, 0xf8, 0xb8, 0xc0, 0xa3, 0xb9, 0x3b, 0x10, 0x6d, 0x7c,
+ 0x92, 0xfc, 0x7f, 0x34, 0x41, },
+ { 0xc4, 0xd8, 0xef, 0xba, 0xef, 0xd2, 0xaa, 0xc5, 0x6c, 0x8e, 0x3e, 0xbb,
+ 0x12, 0xfc, 0x0f, 0x72, 0xbf, 0x0f, },
+ { 0xdd, 0x91, 0xd1, 0x15, 0x9e, 0x7d, 0xf8, 0xc1, 0xb9, 0x14, 0x63, 0x96,
+ 0xb5, 0xcb, 0x83, 0x1d, 0x35, 0x1c, 0xec, },
+ { 0xa9, 0xf8, 0x52, 0xc9, 0x67, 0x76, 0x2b, 0xad, 0xfb, 0xd8, 0x3a, 0xa6,
+ 0x74, 0x02, 0xae, 0xb8, 0x25, 0x2c, 0x63, 0x49, },
+ { 0x77, 0x1f, 0x66, 0x70, 0xfd, 0x50, 0x29, 0xaa, 0xeb, 0xdc, 0xee, 0xba,
+ 0x75, 0x98, 0xdc, 0x93, 0x12, 0x3f, 0xdc, 0x7c, 0x38, },
+ { 0xe2, 0xe1, 0x89, 0x5c, 0x37, 0x38, 0x6a, 0xa3, 0x40, 0xac, 0x3f, 0xb0,
+ 0xca, 0xfc, 0xa7, 0xf3, 0xea, 0xf9, 0x0f, 0x5d, 0x8e, 0x39, },
+ { 0x0f, 0x67, 0xc8, 0x38, 0x01, 0xb1, 0xb7, 0xb8, 0xa2, 0xe7, 0x0a, 0x6d,
+ 0xd2, 0x63, 0x69, 0x9e, 0xcc, 0xf0, 0xf2, 0xbe, 0x9b, 0x98, 0xdd, },
+ { 0x13, 0xe1, 0x36, 0x30, 0xfe, 0xc6, 0x01, 0x8a, 0xa1, 0x63, 0x96, 0x59,
+ 0xc2, 0xa9, 0x68, 0x3f, 0x58, 0xd4, 0x19, 0x0c, 0x40, 0xf3, 0xde, 0x02, },
+ { 0xa3, 0x9e, 0xce, 0xda, 0x42, 0xee, 0x8c, 0x6c, 0x5a, 0x7d, 0xdc, 0x89,
+ 0x02, 0x77, 0xdd, 0xe7, 0x95, 0xbb, 0xff, 0x0d, 0xa4, 0xb5, 0x38, 0x1e,
+ 0xaf, },
+ { 0x9a, 0xf6, 0xb5, 0x9a, 0x4f, 0xa9, 0x4f, 0x2c, 0x35, 0x3c, 0x24, 0xdc,
+ 0x97, 0x6f, 0xd9, 0xa1, 0x7d, 0x1a, 0x85, 0x0b, 0xf5, 0xda, 0x2e, 0xe7,
+ 0xb1, 0x1d, },
+ { 0x84, 0x1e, 0x8e, 0x3d, 0x45, 0xa5, 0xf2, 0x27, 0xf3, 0x31, 0xfe, 0xb9,
+ 0xfb, 0xc5, 0x45, 0x99, 0x99, 0xdd, 0x93, 0x43, 0x02, 0xee, 0x58, 0xaf,
+ 0xee, 0x6a, 0xbe, },
+ { 0x07, 0x2f, 0xc0, 0xa2, 0x04, 0xc4, 0xab, 0x7c, 0x26, 0xbb, 0xa8, 0xd8,
+ 0xe3, 0x1c, 0x75, 0x15, 0x64, 0x5d, 0x02, 0x6a, 0xf0, 0x86, 0xe9, 0xcd,
+ 0x5c, 0xef, 0xa3, 0x25, },
+ { 0x2f, 0x3b, 0x1f, 0xb5, 0x91, 0x8f, 0x86, 0xe0, 0xdc, 0x31, 0x48, 0xb6,
+ 0xa1, 0x8c, 0xfd, 0x75, 0xbb, 0x7d, 0x3d, 0xc1, 0xf0, 0x10, 0x9a, 0xd8,
+ 0x4b, 0x0e, 0xe3, 0x94, 0x9f, },
+ { 0x29, 0xbb, 0x8f, 0x6c, 0xd1, 0xf2, 0xb6, 0xaf, 0xe5, 0xe3, 0x2d, 0xdc,
+ 0x6f, 0xa4, 0x53, 0x88, 0xd8, 0xcf, 0x4d, 0x45, 0x42, 0x62, 0xdb, 0xdf,
+ 0xf8, 0x45, 0xc2, 0x13, 0xec, 0x35, },
+ { 0x06, 0x3c, 0xe3, 0x2c, 0x15, 0xc6, 0x43, 0x03, 0x81, 0xfb, 0x08, 0x76,
+ 0x33, 0xcb, 0x02, 0xc1, 0xba, 0x33, 0xe5, 0xe0, 0xd1, 0x92, 0xa8, 0x46,
+ 0x28, 0x3f, 0x3e, 0x9d, 0x2c, 0x44, 0x54, },
+ { 0xea, 0xbb, 0x96, 0xf8, 0xd1, 0x8b, 0x04, 0x11, 0x40, 0x78, 0x42, 0x02,
+ 0x19, 0xd1, 0xbc, 0x65, 0x92, 0xd3, 0xc3, 0xd6, 0xd9, 0x19, 0xe7, 0xc3,
+ 0x40, 0x97, 0xbd, 0xd4, 0xed, 0xfa, 0x5e, 0x28, },
+ { 0x02, },
+ { 0x52, 0xa8, },
+ { 0x38, 0x25, 0x0d, },
+ { 0xe3, 0x04, 0xd4, 0x92, },
+ { 0x97, 0xdb, 0xf7, 0x81, 0xca, },
+ { 0x8a, 0x56, 0x9d, 0x62, 0x56, 0xcc, },
+ { 0xa1, 0x8e, 0x3c, 0x72, 0x8f, 0x63, 0x03, },
+ { 0xf7, 0xf3, 0x39, 0x09, 0x0a, 0xa1, 0xbb, 0x23, },
+ { 0x6b, 0x03, 0xc0, 0xe9, 0xd9, 0x83, 0x05, 0x22, 0x01, },
+ { 0x1b, 0x4b, 0xf5, 0xd6, 0x4f, 0x05, 0x75, 0x91, 0x4c, 0x7f, },
+ { 0x4c, 0x8c, 0x25, 0x20, 0x21, 0xcb, 0xc2, 0x4b, 0x3a, 0x5b, 0x8d, },
+ { 0x56, 0xe2, 0x77, 0xa0, 0xb6, 0x9f, 0x81, 0xec, 0x83, 0x75, 0xc4, 0xf9, },
+ { 0x71, 0x70, 0x0f, 0xad, 0x4d, 0x35, 0x81, 0x9d, 0x88, 0x69, 0xf9, 0xaa,
+ 0xd3, },
+ { 0x50, 0x6e, 0x86, 0x6e, 0x43, 0xc0, 0xc2, 0x44, 0xc2, 0xe2, 0xa0, 0x1c,
+ 0xb7, 0x9a, },
+ { 0xe4, 0x7e, 0x72, 0xc6, 0x12, 0x8e, 0x7c, 0xfc, 0xbd, 0xe2, 0x08, 0x31,
+ 0x3d, 0x47, 0x3d, },
+ { 0x08, 0x97, 0x5b, 0x80, 0xae, 0xc4, 0x1d, 0x50, 0x77, 0xdf, 0x1f, 0xd0,
+ 0x24, 0xf0, 0x17, 0xc0, },
+ { 0x01, 0xb6, 0x29, 0xf4, 0xaf, 0x78, 0x5f, 0xb6, 0x91, 0xdd, 0x76, 0x76,
+ 0xd2, 0xfd, 0x0c, 0x47, 0x40, },
+ { 0xa1, 0xd8, 0x09, 0x97, 0x7a, 0xa6, 0xc8, 0x94, 0xf6, 0x91, 0x7b, 0xae,
+ 0x2b, 0x9f, 0x0d, 0x83, 0x48, 0xf7, },
+ { 0x12, 0xd5, 0x53, 0x7d, 0x9a, 0xb0, 0xbe, 0xd9, 0xed, 0xe9, 0x9e, 0xee,
+ 0x61, 0x5b, 0x42, 0xf2, 0xc0, 0x73, 0xc0, },
+ { 0xd5, 0x77, 0xd6, 0x5c, 0x6e, 0xa5, 0x69, 0x2b, 0x3b, 0x8c, 0xd6, 0x7d,
+ 0x1d, 0xbe, 0x2c, 0xa1, 0x02, 0x21, 0xcd, 0x29, },
+ { 0xa4, 0x98, 0x80, 0xca, 0x22, 0xcf, 0x6a, 0xab, 0x5e, 0x40, 0x0d, 0x61,
+ 0x08, 0x21, 0xef, 0xc0, 0x6c, 0x52, 0xb4, 0xb0, 0x53, },
+ { 0xbf, 0xaf, 0x8f, 0x3b, 0x7a, 0x97, 0x33, 0xe5, 0xca, 0x07, 0x37, 0xfd,
+ 0x15, 0xdf, 0xce, 0x26, 0x2a, 0xb1, 0xa7, 0x0b, 0xb3, 0xac, },
+ { 0x16, 0x22, 0xe1, 0xbc, 0x99, 0x4e, 0x01, 0xf0, 0xfa, 0xff, 0x8f, 0xa5,
+ 0x0c, 0x61, 0xb0, 0xad, 0xcc, 0xb1, 0xe1, 0x21, 0x46, 0xfa, 0x2e, },
+ { 0x11, 0x5b, 0x0b, 0x2b, 0xe6, 0x14, 0xc1, 0xd5, 0x4d, 0x71, 0x5e, 0x17,
+ 0xea, 0x23, 0xdd, 0x6c, 0xbd, 0x1d, 0xbe, 0x12, 0x1b, 0xee, 0x4c, 0x1a, },
+ { 0x40, 0x88, 0x22, 0xf3, 0x20, 0x6c, 0xed, 0xe1, 0x36, 0x34, 0x62, 0x2c,
+ 0x98, 0x83, 0x52, 0xe2, 0x25, 0xee, 0xe9, 0xf5, 0xe1, 0x17, 0xf0, 0x5c,
+ 0xae, },
+ { 0xc3, 0x76, 0x37, 0xde, 0x95, 0x8c, 0xca, 0x2b, 0x0c, 0x23, 0xe7, 0xb5,
+ 0x38, 0x70, 0x61, 0xcc, 0xff, 0xd3, 0x95, 0x7b, 0xf3, 0xff, 0x1f, 0x9d,
+ 0x59, 0x00, },
+ { 0x0c, 0x19, 0x52, 0x05, 0x22, 0x53, 0xcb, 0x48, 0xd7, 0x10, 0x0e, 0x7e,
+ 0x14, 0x69, 0xb5, 0xa2, 0x92, 0x43, 0xa3, 0x9e, 0x4b, 0x8f, 0x51, 0x2c,
+ 0x5a, 0x2c, 0x3b, },
+ { 0xe1, 0x9d, 0x70, 0x70, 0x28, 0xec, 0x86, 0x40, 0x55, 0x33, 0x56, 0xda,
+ 0x88, 0xca, 0xee, 0xc8, 0x6a, 0x20, 0xb1, 0xe5, 0x3d, 0x57, 0xf8, 0x3c,
+ 0x10, 0x07, 0x2a, 0xc4, },
+ { 0x0b, 0xae, 0xf1, 0xc4, 0x79, 0xee, 0x1b, 0x3d, 0x27, 0x35, 0x8d, 0x14,
+ 0xd6, 0xae, 0x4e, 0x3c, 0xe9, 0x53, 0x50, 0xb5, 0xcc, 0x0c, 0xf7, 0xdf,
+ 0xee, 0xa1, 0x74, 0xd6, 0x71, },
+ { 0xe6, 0xa4, 0xf4, 0x99, 0x98, 0xb9, 0x80, 0xea, 0x96, 0x7f, 0x4f, 0x33,
+ 0xcf, 0x74, 0x25, 0x6f, 0x17, 0x6c, 0xbf, 0xf5, 0x5c, 0x38, 0xd0, 0xff,
+ 0x96, 0xcb, 0x13, 0xf9, 0xdf, 0xfd, },
+ { 0xbe, 0x92, 0xeb, 0xba, 0x44, 0x2c, 0x24, 0x74, 0xd4, 0x03, 0x27, 0x3c,
+ 0x5d, 0x5b, 0x03, 0x30, 0x87, 0x63, 0x69, 0xe0, 0xb8, 0x94, 0xf4, 0x44,
+ 0x7e, 0xad, 0xcd, 0x20, 0x12, 0x16, 0x79, },
+ { 0x30, 0xf1, 0xc4, 0x8e, 0x05, 0x90, 0x2a, 0x97, 0x63, 0x94, 0x46, 0xff,
+ 0xce, 0xd8, 0x67, 0xa7, 0xac, 0x33, 0x8c, 0x95, 0xb7, 0xcd, 0xa3, 0x23,
+ 0x98, 0x9d, 0x76, 0x6c, 0x9d, 0xa8, 0xd6, 0x8a, },
+ { 0xbe, },
+ { 0x17, 0x6c, },
+ { 0x1a, 0x42, 0x4f, },
+ { 0xba, 0xaf, 0xb7, 0x65, },
+ { 0xc2, 0x63, 0x43, 0x6a, 0xea, },
+ { 0xe4, 0x4d, 0xad, 0xf2, 0x0b, 0x02, },
+ { 0x04, 0xc7, 0xc4, 0x7f, 0xa9, 0x2b, 0xce, },
+ { 0x66, 0xf6, 0x67, 0xcb, 0x03, 0x53, 0xc8, 0xf1, },
+ { 0x56, 0xa3, 0x60, 0x78, 0xc9, 0x5f, 0x70, 0x1b, 0x5e, },
+ { 0x99, 0xff, 0x81, 0x7c, 0x13, 0x3c, 0x29, 0x79, 0x4b, 0x65, },
+ { 0x51, 0x10, 0x50, 0x93, 0x01, 0x93, 0xb7, 0x01, 0xc9, 0x18, 0xb7, },
+ { 0x8e, 0x3c, 0x42, 0x1e, 0x5e, 0x7d, 0xc1, 0x50, 0x70, 0x1f, 0x00, 0x98, },
+ { 0x5f, 0xd9, 0x9b, 0xc8, 0xd7, 0xb2, 0x72, 0x62, 0x1a, 0x1e, 0xba, 0x92,
+ 0xe9, },
+ { 0x70, 0x2b, 0xba, 0xfe, 0xad, 0x5d, 0x96, 0x3f, 0x27, 0xc2, 0x41, 0x6d,
+ 0xc4, 0xb3, },
+ { 0xae, 0xe0, 0xd5, 0xd4, 0xc7, 0xae, 0x15, 0x5e, 0xdc, 0xdd, 0x33, 0x60,
+ 0xd7, 0xd3, 0x5e, },
+ { 0x79, 0x8e, 0xbc, 0x9e, 0x20, 0xb9, 0x19, 0x4b, 0x63, 0x80, 0xf3, 0x16,
+ 0xaf, 0x39, 0xbd, 0x92, },
+ { 0xc2, 0x0e, 0x85, 0xa0, 0x0b, 0x9a, 0xb0, 0xec, 0xde, 0x38, 0xd3, 0x10,
+ 0xd9, 0xa7, 0x66, 0x27, 0xcf, },
+ { 0x0e, 0x3b, 0x75, 0x80, 0x67, 0x14, 0x0c, 0x02, 0x90, 0xd6, 0xb3, 0x02,
+ 0x81, 0xf6, 0xa6, 0x87, 0xce, 0x58, },
+ { 0x79, 0xb5, 0xe9, 0x5d, 0x52, 0x4d, 0xf7, 0x59, 0xf4, 0x2e, 0x27, 0xdd,
+ 0xb3, 0xed, 0x57, 0x5b, 0x82, 0xea, 0x6f, },
+ { 0xa2, 0x97, 0xf5, 0x80, 0x02, 0x3d, 0xde, 0xa3, 0xf9, 0xf6, 0xab, 0xe3,
+ 0x57, 0x63, 0x7b, 0x9b, 0x10, 0x42, 0x6f, 0xf2, },
+ { 0x12, 0x7a, 0xfc, 0xb7, 0x67, 0x06, 0x0c, 0x78, 0x1a, 0xfe, 0x88, 0x4f,
+ 0xc6, 0xac, 0x52, 0x96, 0x64, 0x28, 0x97, 0x84, 0x06, },
+ { 0xc5, 0x04, 0x44, 0x6b, 0xb2, 0xa5, 0xa4, 0x66, 0xe1, 0x76, 0xa2, 0x51,
+ 0xf9, 0x59, 0x69, 0x97, 0x56, 0x0b, 0xbf, 0x50, 0xb3, 0x34, },
+ { 0x21, 0x32, 0x6b, 0x42, 0xb5, 0xed, 0x71, 0x8d, 0xf7, 0x5a, 0x35, 0xe3,
+ 0x90, 0xe2, 0xee, 0xaa, 0x89, 0xf6, 0xc9, 0x9c, 0x4d, 0x73, 0xf4, },
+ { 0x4c, 0xa6, 0x09, 0xf4, 0x48, 0xe7, 0x46, 0xbc, 0x49, 0xfc, 0xe5, 0xda,
+ 0xd1, 0x87, 0x13, 0x17, 0x4c, 0x59, 0x71, 0x26, 0x5b, 0x2c, 0x42, 0xb7, },
+ { 0x13, 0x63, 0xf3, 0x40, 0x02, 0xe5, 0xa3, 0x3a, 0x5e, 0x8e, 0xf8, 0xb6,
+ 0x8a, 0x49, 0x60, 0x76, 0x34, 0x72, 0x94, 0x73, 0xf6, 0xd9, 0x21, 0x6a,
+ 0x26, },
+ { 0xdf, 0x75, 0x16, 0x10, 0x1b, 0x5e, 0x81, 0xc3, 0xc8, 0xde, 0x34, 0x24,
+ 0xb0, 0x98, 0xeb, 0x1b, 0x8f, 0xa1, 0x9b, 0x05, 0xee, 0xa5, 0xe9, 0x35,
+ 0xf4, 0x1d, },
+ { 0xcd, 0x21, 0x93, 0x6e, 0x5b, 0xa0, 0x26, 0x2b, 0x21, 0x0e, 0xa0, 0xb9,
+ 0x1c, 0xb5, 0xbb, 0xb8, 0xf8, 0x1e, 0xff, 0x5c, 0xa8, 0xf9, 0x39, 0x46,
+ 0x4e, 0x29, 0x26, },
+ { 0x73, 0x7f, 0x0e, 0x3b, 0x0b, 0x5c, 0xf9, 0x60, 0xaa, 0x88, 0xa1, 0x09,
+ 0xb1, 0x5d, 0x38, 0x7b, 0x86, 0x8f, 0x13, 0x7a, 0x8d, 0x72, 0x7a, 0x98,
+ 0x1a, 0x5b, 0xff, 0xc9, },
+ { 0xd3, 0x3c, 0x61, 0x71, 0x44, 0x7e, 0x31, 0x74, 0x98, 0x9d, 0x9a, 0xd2,
+ 0x27, 0xf3, 0x46, 0x43, 0x42, 0x51, 0xd0, 0x5f, 0xe9, 0x1c, 0x5c, 0x69,
+ 0xbf, 0xf6, 0xbe, 0x3c, 0x40, },
+ { 0x31, 0x99, 0x31, 0x9f, 0xaa, 0x43, 0x2e, 0x77, 0x3e, 0x74, 0x26, 0x31,
+ 0x5e, 0x61, 0xf1, 0x87, 0xe2, 0xeb, 0x9b, 0xcd, 0xd0, 0x3a, 0xee, 0x20,
+ 0x7e, 0x10, 0x0a, 0x0b, 0x7e, 0xfa, },
+ { 0xa4, 0x27, 0x80, 0x67, 0x81, 0x2a, 0xa7, 0x62, 0xf7, 0x6e, 0xda, 0xd4,
+ 0x5c, 0x39, 0x74, 0xad, 0x7e, 0xbe, 0xad, 0xa5, 0x84, 0x7f, 0xa9, 0x30,
+ 0x5d, 0xdb, 0xe2, 0x05, 0x43, 0xf7, 0x1b, },
+ { 0x0b, 0x37, 0xd8, 0x02, 0xe1, 0x83, 0xd6, 0x80, 0xf2, 0x35, 0xc2, 0xb0,
+ 0x37, 0xef, 0xef, 0x5e, 0x43, 0x93, 0xf0, 0x49, 0x45, 0x0a, 0xef, 0xb5,
+ 0x76, 0x70, 0x12, 0x44, 0xc4, 0xdb, 0xf5, 0x7a, },
+ { 0x1f, },
+ { 0x82, 0x60, },
+ { 0xcc, 0xe3, 0x08, },
+ { 0x56, 0x17, 0xe4, 0x59, },
+ { 0xe2, 0xd7, 0x9e, 0xc4, 0x4c, },
+ { 0xb2, 0xad, 0xd3, 0x78, 0x58, 0x5a, },
+ { 0xce, 0x43, 0xb4, 0x02, 0x96, 0xab, 0x3c, },
+ { 0xe6, 0x05, 0x1a, 0x73, 0x22, 0x32, 0xbb, 0x77, },
+ { 0x23, 0xe7, 0xda, 0xfe, 0x2c, 0xef, 0x8c, 0x22, 0xec, },
+ { 0xe9, 0x8e, 0x55, 0x38, 0xd1, 0xd7, 0x35, 0x23, 0x98, 0xc7, },
+ { 0xb5, 0x81, 0x1a, 0xe5, 0xb5, 0xa5, 0xd9, 0x4d, 0xca, 0x41, 0xe7, },
+ { 0x41, 0x16, 0x16, 0x95, 0x8d, 0x9e, 0x0c, 0xea, 0x8c, 0x71, 0x9a, 0xc1, },
+ { 0x7c, 0x33, 0xc0, 0xa4, 0x00, 0x62, 0xea, 0x60, 0x67, 0xe4, 0x20, 0xbc,
+ 0x5b, },
+ { 0xdb, 0xb1, 0xdc, 0xfd, 0x08, 0xc0, 0xde, 0x82, 0xd1, 0xde, 0x38, 0xc0,
+ 0x90, 0x48, },
+ { 0x37, 0x18, 0x2e, 0x0d, 0x61, 0xaa, 0x61, 0xd7, 0x86, 0x20, 0x16, 0x60,
+ 0x04, 0xd9, 0xd5, },
+ { 0xb0, 0xcf, 0x2c, 0x4c, 0x5e, 0x5b, 0x4f, 0x2a, 0x23, 0x25, 0x58, 0x47,
+ 0xe5, 0x31, 0x06, 0x70, },
+ { 0x91, 0xa0, 0xa3, 0x86, 0x4e, 0xe0, 0x72, 0x38, 0x06, 0x67, 0x59, 0x5c,
+ 0x70, 0x25, 0xdb, 0x33, 0x27, },
+ { 0x44, 0x58, 0x66, 0xb8, 0x58, 0xc7, 0x13, 0xed, 0x4c, 0xc0, 0xf4, 0x9a,
+ 0x1e, 0x67, 0x75, 0x33, 0xb6, 0xb8, },
+ { 0x7f, 0x98, 0x4a, 0x8e, 0x50, 0xa2, 0x5c, 0xcd, 0x59, 0xde, 0x72, 0xb3,
+ 0x9d, 0xc3, 0x09, 0x8a, 0xab, 0x56, 0xf1, },
+ { 0x80, 0x96, 0x49, 0x1a, 0x59, 0xa2, 0xc5, 0xd5, 0xa7, 0x20, 0x8a, 0xb7,
+ 0x27, 0x62, 0x84, 0x43, 0xc6, 0xe1, 0x1b, 0x5d, },
+ { 0x6b, 0xb7, 0x2b, 0x26, 0x62, 0x14, 0x70, 0x19, 0x3d, 0x4d, 0xac, 0xac,
+ 0x63, 0x58, 0x5e, 0x94, 0xb5, 0xb7, 0xe8, 0xe8, 0xa2, },
+ { 0x20, 0xa8, 0xc0, 0xfd, 0x63, 0x3d, 0x6e, 0x98, 0xcf, 0x0c, 0x49, 0x98,
+ 0xe4, 0x5a, 0xfe, 0x8c, 0xaa, 0x70, 0x82, 0x1c, 0x7b, 0x74, },
+ { 0xc8, 0xe8, 0xdd, 0xdf, 0x69, 0x30, 0x01, 0xc2, 0x0f, 0x7e, 0x2f, 0x11,
+ 0xcc, 0x3e, 0x17, 0xa5, 0x69, 0x40, 0x3f, 0x0e, 0x79, 0x7f, 0xcf, },
+ { 0xdb, 0x61, 0xc0, 0xe2, 0x2e, 0x49, 0x07, 0x31, 0x1d, 0x91, 0x42, 0x8a,
+ 0xfc, 0x5e, 0xd3, 0xf8, 0x56, 0x1f, 0x2b, 0x73, 0xfd, 0x9f, 0xb2, 0x8e, },
+ { 0x0c, 0x89, 0x55, 0x0c, 0x1f, 0x59, 0x2c, 0x9d, 0x1b, 0x29, 0x1d, 0x41,
+ 0x1d, 0xe6, 0x47, 0x8f, 0x8c, 0x2b, 0xea, 0x8f, 0xf0, 0xff, 0x21, 0x70,
+ 0x88, },
+ { 0x12, 0x18, 0x95, 0xa6, 0x59, 0xb1, 0x31, 0x24, 0x45, 0x67, 0x55, 0xa4,
+ 0x1a, 0x2d, 0x48, 0x67, 0x1b, 0x43, 0x88, 0x2d, 0x8e, 0xa0, 0x70, 0xb3,
+ 0xc6, 0xbb, },
+ { 0xe7, 0xb1, 0x1d, 0xb2, 0x76, 0x4d, 0x68, 0x68, 0x68, 0x23, 0x02, 0x55,
+ 0x3a, 0xe2, 0xe5, 0xd5, 0x4b, 0x43, 0xf9, 0x34, 0x77, 0x5c, 0xa1, 0xf5,
+ 0x55, 0xfd, 0x4f, },
+ { 0x8c, 0x87, 0x5a, 0x08, 0x3a, 0x73, 0xad, 0x61, 0xe1, 0xe7, 0x99, 0x7e,
+ 0xf0, 0x5d, 0xe9, 0x5d, 0x16, 0x43, 0x80, 0x2f, 0xd0, 0x66, 0x34, 0xe2,
+ 0x42, 0x64, 0x3b, 0x1a, },
+ { 0x39, 0xc1, 0x99, 0xcf, 0x22, 0xbf, 0x16, 0x8f, 0x9f, 0x80, 0x7f, 0x95,
+ 0x0a, 0x05, 0x67, 0x27, 0xe7, 0x15, 0xdf, 0x9d, 0xb2, 0xfe, 0x1c, 0xb5,
+ 0x1d, 0x60, 0x8f, 0x8a, 0x1d, },
+ { 0x9b, 0x6e, 0x08, 0x09, 0x06, 0x73, 0xab, 0x68, 0x02, 0x62, 0x1a, 0xe4,
+ 0xd4, 0xdf, 0xc7, 0x02, 0x4c, 0x6a, 0x5f, 0xfd, 0x23, 0xac, 0xae, 0x6d,
+ 0x43, 0xa4, 0x7a, 0x50, 0x60, 0x3c, },
+ { 0x1d, 0xb4, 0xc6, 0xe1, 0xb1, 0x4b, 0xe3, 0xf2, 0xe2, 0x1a, 0x73, 0x1b,
+ 0xa0, 0x92, 0xa7, 0xf5, 0xff, 0x8f, 0x8b, 0x5d, 0xdf, 0xa8, 0x04, 0xb3,
+ 0xb0, 0xf7, 0xcc, 0x12, 0xfa, 0x35, 0x46, },
+ { 0x49, 0x45, 0x97, 0x11, 0x0f, 0x1c, 0x60, 0x8e, 0xe8, 0x47, 0x30, 0xcf,
+ 0x60, 0xa8, 0x71, 0xc5, 0x1b, 0xe9, 0x39, 0x4d, 0x49, 0xb6, 0x12, 0x1f,
+ 0x24, 0xab, 0x37, 0xff, 0x83, 0xc2, 0xe1, 0x3a, },
+ { 0x60, },
+ { 0x24, 0x26, },
+ { 0x47, 0xeb, 0xc9, },
+ { 0x4a, 0xd0, 0xbc, 0xf0, },
+ { 0x8e, 0x2b, 0xc9, 0x85, 0x3c, },
+ { 0xa2, 0x07, 0x15, 0xb8, 0x12, 0x74, },
+ { 0x0f, 0xdb, 0x5b, 0x33, 0x69, 0xfe, 0x4b, },
+ { 0xa2, 0x86, 0x54, 0xf4, 0xfd, 0xb2, 0xd4, 0xe6, },
+ { 0xbb, 0x84, 0x78, 0x49, 0x27, 0x8e, 0x61, 0xda, 0x60, },
+ { 0x04, 0xc3, 0xcd, 0xaa, 0x8f, 0xa7, 0x03, 0xc9, 0xf9, 0xb6, },
+ { 0xf8, 0x27, 0x1d, 0x61, 0xdc, 0x21, 0x42, 0xdd, 0xad, 0x92, 0x40, },
+ { 0x12, 0x87, 0xdf, 0xc2, 0x41, 0x45, 0x5a, 0x36, 0x48, 0x5b, 0x51, 0x2b, },
+ { 0xbb, 0x37, 0x5d, 0x1f, 0xf1, 0x68, 0x7a, 0xc4, 0xa5, 0xd2, 0xa4, 0x91,
+ 0x8d, },
+ { 0x5b, 0x27, 0xd1, 0x04, 0x54, 0x52, 0x9f, 0xa3, 0x47, 0x86, 0x33, 0x33,
+ 0xbf, 0xa0, },
+ { 0xcf, 0x04, 0xea, 0xf8, 0x03, 0x2a, 0x43, 0xff, 0xa6, 0x68, 0x21, 0x4c,
+ 0xd5, 0x4b, 0xed, },
+ { 0xaf, 0xb8, 0xbc, 0x63, 0x0f, 0x18, 0x4d, 0xe2, 0x7a, 0xdd, 0x46, 0x44,
+ 0xc8, 0x24, 0x0a, 0xb7, },
+ { 0x3e, 0xdc, 0x36, 0xe4, 0x89, 0xb1, 0xfa, 0xc6, 0x40, 0x93, 0x2e, 0x75,
+ 0xb2, 0x15, 0xd1, 0xb1, 0x10, },
+ { 0x6c, 0xd8, 0x20, 0x3b, 0x82, 0x79, 0xf9, 0xc8, 0xbc, 0x9d, 0xe0, 0x35,
+ 0xbe, 0x1b, 0x49, 0x1a, 0xbc, 0x3a, },
+ { 0x78, 0x65, 0x2c, 0xbe, 0x35, 0x67, 0xdc, 0x78, 0xd4, 0x41, 0xf6, 0xc9,
+ 0xde, 0xde, 0x1f, 0x18, 0x13, 0x31, 0x11, },
+ { 0x8a, 0x7f, 0xb1, 0x33, 0x8f, 0x0c, 0x3c, 0x0a, 0x06, 0x61, 0xf0, 0x47,
+ 0x29, 0x1b, 0x29, 0xbc, 0x1c, 0x47, 0xef, 0x7a, },
+ { 0x65, 0x91, 0xf1, 0xe6, 0xb3, 0x96, 0xd3, 0x8c, 0xc2, 0x4a, 0x59, 0x35,
+ 0x72, 0x8e, 0x0b, 0x9a, 0x87, 0xca, 0x34, 0x7b, 0x63, },
+ { 0x5f, 0x08, 0x87, 0x80, 0x56, 0x25, 0x89, 0x77, 0x61, 0x8c, 0x64, 0xa1,
+ 0x59, 0x6d, 0x59, 0x62, 0xe8, 0x4a, 0xc8, 0x58, 0x99, 0xd1, },
+ { 0x23, 0x87, 0x1d, 0xed, 0x6f, 0xf2, 0x91, 0x90, 0xe2, 0xfe, 0x43, 0x21,
+ 0xaf, 0x97, 0xc6, 0xbc, 0xd7, 0x15, 0xc7, 0x2d, 0x08, 0x77, 0x91, },
+ { 0x90, 0x47, 0x9a, 0x9e, 0x3a, 0xdf, 0xf3, 0xc9, 0x4c, 0x1e, 0xa7, 0xd4,
+ 0x6a, 0x32, 0x90, 0xfe, 0xb7, 0xb6, 0x7b, 0xfa, 0x96, 0x61, 0xfb, 0xa4, },
+ { 0xb1, 0x67, 0x60, 0x45, 0xb0, 0x96, 0xc5, 0x15, 0x9f, 0x4d, 0x26, 0xd7,
+ 0x9d, 0xf1, 0xf5, 0x6d, 0x21, 0x00, 0x94, 0x31, 0x64, 0x94, 0xd3, 0xa7,
+ 0xd3, },
+ { 0x02, 0x3e, 0xaf, 0xf3, 0x79, 0x73, 0xa5, 0xf5, 0xcc, 0x7a, 0x7f, 0xfb,
+ 0x79, 0x2b, 0x85, 0x8c, 0x88, 0x72, 0x06, 0xbe, 0xfe, 0xaf, 0xc1, 0x16,
+ 0xa6, 0xd6, },
+ { 0x2a, 0xb0, 0x1a, 0xe5, 0xaa, 0x6e, 0xb3, 0xae, 0x53, 0x85, 0x33, 0x80,
+ 0x75, 0xae, 0x30, 0xe6, 0xb8, 0x72, 0x42, 0xf6, 0x25, 0x4f, 0x38, 0x88,
+ 0x55, 0xd1, 0xa9, },
+ { 0x90, 0xd8, 0x0c, 0xc0, 0x93, 0x4b, 0x4f, 0x9e, 0x65, 0x6c, 0xa1, 0x54,
+ 0xa6, 0xf6, 0x6e, 0xca, 0xd2, 0xbb, 0x7e, 0x6a, 0x1c, 0xd3, 0xce, 0x46,
+ 0xef, 0xb0, 0x00, 0x8d, },
+ { 0xed, 0x9c, 0x49, 0xcd, 0xc2, 0xde, 0x38, 0x0e, 0xe9, 0x98, 0x6c, 0xc8,
+ 0x90, 0x9e, 0x3c, 0xd4, 0xd3, 0xeb, 0x88, 0x32, 0xc7, 0x28, 0xe3, 0x94,
+ 0x1c, 0x9f, 0x8b, 0xf3, 0xcb, },
+ { 0xac, 0xe7, 0x92, 0x16, 0xb4, 0x14, 0xa0, 0xe4, 0x04, 0x79, 0xa2, 0xf4,
+ 0x31, 0xe6, 0x0c, 0x26, 0xdc, 0xbf, 0x2f, 0x69, 0x1b, 0x55, 0x94, 0x67,
+ 0xda, 0x0c, 0xd7, 0x32, 0x1f, 0xef, },
+ { 0x68, 0x63, 0x85, 0x57, 0x95, 0x9e, 0x42, 0x27, 0x41, 0x43, 0x42, 0x02,
+ 0xa5, 0x78, 0xa7, 0xc6, 0x43, 0xc1, 0x6a, 0xba, 0x70, 0x80, 0xcd, 0x04,
+ 0xb6, 0x78, 0x76, 0x29, 0xf3, 0xe8, 0xa0, },
+ { 0xe6, 0xac, 0x8d, 0x9d, 0xf0, 0xc0, 0xf7, 0xf7, 0xe3, 0x3e, 0x4e, 0x28,
+ 0x0f, 0x59, 0xb2, 0x67, 0x9e, 0x84, 0x34, 0x42, 0x96, 0x30, 0x2b, 0xca,
+ 0x49, 0xb6, 0xc5, 0x9a, 0x84, 0x59, 0xa7, 0x81, },
+ { 0x7e, },
+ { 0x1e, 0x21, },
+ { 0x26, 0xd3, 0xdd, },
+ { 0x2c, 0xd4, 0xb3, 0x3d, },
+ { 0x86, 0x7b, 0x76, 0x3c, 0xf0, },
+ { 0x12, 0xc3, 0x70, 0x1d, 0x55, 0x18, },
+ { 0x96, 0xc2, 0xbd, 0x61, 0x55, 0xf4, 0x24, },
+ { 0x20, 0x51, 0xf7, 0x86, 0x58, 0x8f, 0x07, 0x2a, },
+ { 0x93, 0x15, 0xa8, 0x1d, 0xda, 0x97, 0xee, 0x0e, 0x6c, },
+ { 0x39, 0x93, 0xdf, 0xd5, 0x0e, 0xca, 0xdc, 0x7a, 0x92, 0xce, },
+ { 0x60, 0xd5, 0xfd, 0xf5, 0x1b, 0x26, 0x82, 0x26, 0x73, 0x02, 0xbc, },
+ { 0x98, 0xf2, 0x34, 0xe1, 0xf5, 0xfb, 0x00, 0xac, 0x10, 0x4a, 0x38, 0x9f, },
+ { 0xda, 0x3a, 0x92, 0x8a, 0xd0, 0xcd, 0x12, 0xcd, 0x15, 0xbb, 0xab, 0x77,
+ 0x66, },
+ { 0xa2, 0x92, 0x1a, 0xe5, 0xca, 0x0c, 0x30, 0x75, 0xeb, 0xaf, 0x00, 0x31,
+ 0x55, 0x66, },
+ { 0x06, 0xea, 0xfd, 0x3e, 0x86, 0x38, 0x62, 0x4e, 0xa9, 0x12, 0xa4, 0x12,
+ 0x43, 0xbf, 0xa1, },
+ { 0xe4, 0x71, 0x7b, 0x94, 0xdb, 0xa0, 0xd2, 0xff, 0x9b, 0xeb, 0xad, 0x8e,
+ 0x95, 0x8a, 0xc5, 0xed, },
+ { 0x25, 0x5a, 0x77, 0x71, 0x41, 0x0e, 0x7a, 0xe9, 0xed, 0x0c, 0x10, 0xef,
+ 0xf6, 0x2b, 0x3a, 0xba, 0x60, },
+ { 0xee, 0xe2, 0xa3, 0x67, 0x64, 0x1d, 0xc6, 0x04, 0xc4, 0xe1, 0x68, 0xd2,
+ 0x6e, 0xd2, 0x91, 0x75, 0x53, 0x07, },
+ { 0xe0, 0xf6, 0x4d, 0x8f, 0x68, 0xfc, 0x06, 0x7e, 0x18, 0x79, 0x7f, 0x2b,
+ 0x6d, 0xef, 0x46, 0x7f, 0xab, 0xb2, 0xad, },
+ { 0x3d, 0x35, 0x88, 0x9f, 0x2e, 0xcf, 0x96, 0x45, 0x07, 0x60, 0x71, 0x94,
+ 0x00, 0x8d, 0xbf, 0xf4, 0xef, 0x46, 0x2e, 0x3c, },
+ { 0x43, 0xcf, 0x98, 0xf7, 0x2d, 0xf4, 0x17, 0xe7, 0x8c, 0x05, 0x2d, 0x9b,
+ 0x24, 0xfb, 0x4d, 0xea, 0x4a, 0xec, 0x01, 0x25, 0x29, },
+ { 0x8e, 0x73, 0x9a, 0x78, 0x11, 0xfe, 0x48, 0xa0, 0x3b, 0x1a, 0x26, 0xdf,
+ 0x25, 0xe9, 0x59, 0x1c, 0x70, 0x07, 0x9f, 0xdc, 0xa0, 0xa6, },
+ { 0xe8, 0x47, 0x71, 0xc7, 0x3e, 0xdf, 0xb5, 0x13, 0xb9, 0x85, 0x13, 0xa8,
+ 0x54, 0x47, 0x6e, 0x59, 0x96, 0x09, 0x13, 0x5f, 0x82, 0x16, 0x0b, },
+ { 0xfb, 0xc0, 0x8c, 0x03, 0x21, 0xb3, 0xc4, 0xb5, 0x43, 0x32, 0x6c, 0xea,
+ 0x7f, 0xa8, 0x43, 0x91, 0xe8, 0x4e, 0x3f, 0xbf, 0x45, 0x58, 0x6a, 0xa3, },
+ { 0x55, 0xf8, 0xf3, 0x00, 0x76, 0x09, 0xef, 0x69, 0x5d, 0xd2, 0x8a, 0xf2,
+ 0x65, 0xc3, 0xcb, 0x9b, 0x43, 0xfd, 0xb1, 0x7e, 0x7f, 0xa1, 0x94, 0xb0,
+ 0xd7, },
+ { 0xaa, 0x13, 0xc1, 0x51, 0x40, 0x6d, 0x8d, 0x4c, 0x0a, 0x95, 0x64, 0x7b,
+ 0xd1, 0x96, 0xb6, 0x56, 0xb4, 0x5b, 0xcf, 0xd6, 0xd9, 0x15, 0x97, 0xdd,
+ 0xb6, 0xef, },
+ { 0xaf, 0xb7, 0x36, 0xb0, 0x04, 0xdb, 0xd7, 0x9c, 0x9a, 0x44, 0xc4, 0xf6,
+ 0x1f, 0x12, 0x21, 0x2d, 0x59, 0x30, 0x54, 0xab, 0x27, 0x61, 0xa3, 0x57,
+ 0xef, 0xf8, 0x53, },
+ { 0x97, 0x34, 0x45, 0x3e, 0xce, 0x7c, 0x35, 0xa2, 0xda, 0x9f, 0x4b, 0x46,
+ 0x6c, 0x11, 0x67, 0xff, 0x2f, 0x76, 0x58, 0x15, 0x71, 0xfa, 0x44, 0x89,
+ 0x89, 0xfd, 0xf7, 0x99, },
+ { 0x1f, 0xb1, 0x62, 0xeb, 0x83, 0xc5, 0x9c, 0x89, 0xf9, 0x2c, 0xd2, 0x03,
+ 0x61, 0xbc, 0xbb, 0xa5, 0x74, 0x0e, 0x9b, 0x7e, 0x82, 0x3e, 0x70, 0x0a,
+ 0xa9, 0x8f, 0x2b, 0x59, 0xfb, },
+ { 0xf8, 0xca, 0x5e, 0x3a, 0x4f, 0x9e, 0x10, 0x69, 0x10, 0xd5, 0x4c, 0xeb,
+ 0x1a, 0x0f, 0x3c, 0x6a, 0x98, 0xf5, 0xb0, 0x97, 0x5b, 0x37, 0x2f, 0x0d,
+ 0xbd, 0x42, 0x4b, 0x69, 0xa1, 0x82, },
+ { 0x12, 0x8c, 0x6d, 0x52, 0x08, 0xef, 0x74, 0xb2, 0xe6, 0xaa, 0xd3, 0xb0,
+ 0x26, 0xb0, 0xd9, 0x94, 0xb6, 0x11, 0x45, 0x0e, 0x36, 0x71, 0x14, 0x2d,
+ 0x41, 0x8c, 0x21, 0x53, 0x31, 0xe9, 0x68, },
+ { 0xee, 0xea, 0x0d, 0x89, 0x47, 0x7e, 0x72, 0xd1, 0xd8, 0xce, 0x58, 0x4c,
+ 0x94, 0x1f, 0x0d, 0x51, 0x08, 0xa3, 0xb6, 0x3d, 0xe7, 0x82, 0x46, 0x92,
+ 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
+};
+
+static bool __init noinline_for_stack blake2s_digest_test(void)
+{
+ u8 key[BLAKE2S_KEY_SIZE];
+ u8 buf[ARRAY_SIZE(blake2s_testvecs)];
+ u8 hash[BLAKE2S_HASH_SIZE];
+ struct blake2s_state state;
+ bool success = true;
+ int i, l;
+
+ key[0] = key[1] = 1;
+ for (i = 2; i < sizeof(key); ++i)
+ key[i] = key[i - 2] + key[i - 1];
+
+ for (i = 0; i < sizeof(buf); ++i)
+ buf[i] = (u8)i;
+
+ for (i = l = 0; i < ARRAY_SIZE(blake2s_testvecs); l = (l + 37) % ++i) {
+ int outlen = 1 + i % BLAKE2S_HASH_SIZE;
+ int keylen = (13 * i) % (BLAKE2S_KEY_SIZE + 1);
+
+ blake2s(hash, buf, key + BLAKE2S_KEY_SIZE - keylen, outlen, i,
+ keylen);
+ if (memcmp(hash, blake2s_testvecs[i], outlen)) {
+ pr_err("blake2s self-test %d: FAIL\n", i + 1);
+ success = false;
+ }
+
+ if (!keylen)
+ blake2s_init(&state, outlen);
+ else
+ blake2s_init_key(&state, outlen,
+ key + BLAKE2S_KEY_SIZE - keylen,
+ keylen);
+
+ blake2s_update(&state, buf, l);
+ blake2s_update(&state, buf + l, i - l);
+ blake2s_final(&state, hash);
+ if (memcmp(hash, blake2s_testvecs[i], outlen)) {
+ pr_err("blake2s init/update/final self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ return success;
+}
+
+static bool __init noinline_for_stack blake2s_random_test(void)
+{
+ struct blake2s_state state;
+ bool success = true;
+ int i, l;
+
+ for (i = 0; i < 32; ++i) {
+ enum { TEST_ALIGNMENT = 16 };
+ u8 blocks[BLAKE2S_BLOCK_SIZE * 2 + TEST_ALIGNMENT - 1]
+ __aligned(TEST_ALIGNMENT);
+ u8 *unaligned_block = blocks + BLAKE2S_BLOCK_SIZE;
+ struct blake2s_state state1, state2;
+
+ get_random_bytes(blocks, sizeof(blocks));
+ get_random_bytes(&state, sizeof(state));
+
+#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \
+ defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
+ memcpy(&state1, &state, sizeof(state1));
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+#endif
+
+ memcpy(&state1, &state, sizeof(state1));
+ blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE);
+ for (l = 1; l < TEST_ALIGNMENT; ++l) {
+ memcpy(unaligned_block + l, blocks,
+ BLAKE2S_BLOCK_SIZE);
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state2, unaligned_block + l, 1,
+ BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress align %d self-test %d: FAIL\n",
+ l, i + 1);
+ success = false;
+ }
+ }
+ }
+
+ return success;
+}
+
+bool __init blake2s_selftest(void)
+{
+ bool success;
+
+ success = blake2s_digest_test();
+ success &= blake2s_random_test();
+
+ return success;
+}
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
new file mode 100644
index 000000000..71a316552
--- /dev/null
+++ b/lib/crypto/blake2s.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the BLAKE2s hash and PRF functions.
+ *
+ * Information: https://blake2.net/
+ *
+ */
+
+#include <crypto/internal/blake2s.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+
+static inline void blake2s_set_lastblock(struct blake2s_state *state)
+{
+ state->f[0] = -1;
+}
+
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
+{
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
+}
+EXPORT_SYMBOL(blake2s_update);
+
+void blake2s_final(struct blake2s_state *state, u8 *out)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && !out);
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ blake2s_compress(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
+ memzero_explicit(state, sizeof(*state));
+}
+EXPORT_SYMBOL(blake2s_final);
+
+static int __init blake2s_mod_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!blake2s_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+module_init(blake2s_mod_init);
+MODULE_DESCRIPTION("BLAKE2s hash function");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c
new file mode 100644
index 000000000..b748fd3d2
--- /dev/null
+++ b/lib/crypto/chacha.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
+ *
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <asm/unaligned.h>
+#include <crypto/chacha.h>
+
+static void chacha_permute(u32 *x, int nrounds)
+{
+ int i;
+
+ /* whitelist the allowed round counts */
+ WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
+
+ for (i = 0; i < nrounds; i += 2) {
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
+
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
+
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
+
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
+
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
+
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
+
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
+
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
+ }
+}
+
+/**
+ * chacha_block_generic - generate one keystream block and increment block counter
+ * @state: input state matrix (16 32-bit words)
+ * @stream: output keystream block (64 bytes)
+ * @nrounds: number of rounds (20 or 12; 20 is recommended)
+ *
+ * This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
+ * The caller has already converted the endianness of the input. This function
+ * also handles incrementing the block counter in the input matrix.
+ */
+void chacha_block_generic(u32 *state, u8 *stream, int nrounds)
+{
+ u32 x[16];
+ int i;
+
+ memcpy(x, state, 64);
+
+ chacha_permute(x, nrounds);
+
+ for (i = 0; i < ARRAY_SIZE(x); i++)
+ put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
+
+ state[12]++;
+}
+EXPORT_SYMBOL(chacha_block_generic);
+
+/**
+ * hchacha_block_generic - abbreviated ChaCha core, for XChaCha
+ * @state: input state matrix (16 32-bit words)
+ * @stream: output (8 32-bit words)
+ * @nrounds: number of rounds (20 or 12; 20 is recommended)
+ *
+ * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
+ * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
+ * skips the final addition of the initial state, and outputs only certain words
+ * of the state. It should not be used for streaming directly.
+ */
+void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds)
+{
+ u32 x[16];
+
+ memcpy(x, state, 64);
+
+ chacha_permute(x, nrounds);
+
+ memcpy(&stream[0], &x[0], 16);
+ memcpy(&stream[4], &x[12], 16);
+}
+EXPORT_SYMBOL(hchacha_block_generic);
diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c
new file mode 100644
index 000000000..fa43deda2
--- /dev/null
+++ b/lib/crypto/chacha20poly1305-selftest.c
@@ -0,0 +1,9082 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/chacha20poly1305.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
+
+#include <asm/unaligned.h>
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct chacha20poly1305_testvec {
+ const u8 *input, *output, *assoc, *nonce, *key;
+ size_t ilen, alen, nlen;
+ bool failure;
+};
+
+/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539
+ * 2.8.2. After they are generated by reference implementations. And the final
+ * marked ones are taken from wycheproof, but we only do these for the encrypt
+ * side, because mostly we're stressing the primitives rather than the actual
+ * chapoly construction.
+ */
+
+static const u8 enc_input001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 enc_output001[] __initconst = {
+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
+ 0x38
+};
+static const u8 enc_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 enc_nonce001[] __initconst = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+};
+static const u8 enc_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const u8 enc_input002[] __initconst = { };
+static const u8 enc_output002[] __initconst = {
+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
+};
+static const u8 enc_assoc002[] __initconst = { };
+static const u8 enc_nonce002[] __initconst = {
+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
+};
+static const u8 enc_key002[] __initconst = {
+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
+};
+
+static const u8 enc_input003[] __initconst = { };
+static const u8 enc_output003[] __initconst = {
+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
+};
+static const u8 enc_assoc003[] __initconst = {
+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
+};
+static const u8 enc_nonce003[] __initconst = {
+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
+};
+static const u8 enc_key003[] __initconst = {
+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
+};
+
+static const u8 enc_input004[] __initconst = {
+ 0xa4
+};
+static const u8 enc_output004[] __initconst = {
+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
+ 0x89
+};
+static const u8 enc_assoc004[] __initconst = {
+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
+};
+static const u8 enc_nonce004[] __initconst = {
+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
+};
+static const u8 enc_key004[] __initconst = {
+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
+};
+
+static const u8 enc_input005[] __initconst = {
+ 0x2d
+};
+static const u8 enc_output005[] __initconst = {
+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
+ 0xac
+};
+static const u8 enc_assoc005[] __initconst = { };
+static const u8 enc_nonce005[] __initconst = {
+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
+};
+static const u8 enc_key005[] __initconst = {
+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
+};
+
+static const u8 enc_input006[] __initconst = {
+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
+ 0x8f
+};
+static const u8 enc_output006[] __initconst = {
+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
+ 0xeb
+};
+static const u8 enc_assoc006[] __initconst = {
+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
+};
+static const u8 enc_nonce006[] __initconst = {
+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
+};
+static const u8 enc_key006[] __initconst = {
+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
+};
+
+static const u8 enc_input007[] __initconst = {
+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
+};
+static const u8 enc_output007[] __initconst = {
+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
+};
+static const u8 enc_assoc007[] __initconst = { };
+static const u8 enc_nonce007[] __initconst = {
+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
+};
+static const u8 enc_key007[] __initconst = {
+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
+};
+
+static const u8 enc_input008[] __initconst = {
+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
+};
+static const u8 enc_output008[] __initconst = {
+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
+};
+static const u8 enc_assoc008[] __initconst = { };
+static const u8 enc_nonce008[] __initconst = {
+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
+};
+static const u8 enc_key008[] __initconst = {
+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
+};
+
+static const u8 enc_input009[] __initconst = {
+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
+ 0x65
+};
+static const u8 enc_output009[] __initconst = {
+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
+ 0xae
+};
+static const u8 enc_assoc009[] __initconst = {
+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
+ 0xef
+};
+static const u8 enc_nonce009[] __initconst = {
+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
+};
+static const u8 enc_key009[] __initconst = {
+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
+};
+
+static const u8 enc_input010[] __initconst = {
+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
+};
+static const u8 enc_output010[] __initconst = {
+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
+};
+static const u8 enc_assoc010[] __initconst = {
+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
+};
+static const u8 enc_nonce010[] __initconst = {
+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
+};
+static const u8 enc_key010[] __initconst = {
+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
+};
+
+static const u8 enc_input011[] __initconst = {
+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
+ 0x10, 0x1e, 0xbf, 0xec, 0xa8
+};
+static const u8 enc_output011[] __initconst = {
+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c
+};
+static const u8 enc_assoc011[] __initconst = {
+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
+};
+static const u8 enc_nonce011[] __initconst = {
+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
+};
+static const u8 enc_key011[] __initconst = {
+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
+};
+
+static const u8 enc_input012[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 enc_output012[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd6
+};
+static const u8 enc_assoc012[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 enc_nonce012[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 enc_key012[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+/* wycheproof - rfc7539 */
+static const u8 enc_input013[] __initconst = {
+ 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61,
+ 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c,
+ 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39,
+ 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63,
+ 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66,
+ 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20,
+ 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20,
+ 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75,
+ 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73,
+ 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f,
+ 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69,
+ 0x74, 0x2e
+};
+static const u8 enc_output013[] __initconst = {
+ 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb,
+ 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2,
+ 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe,
+ 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6,
+ 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12,
+ 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b,
+ 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29,
+ 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36,
+ 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c,
+ 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58,
+ 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94,
+ 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc,
+ 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d,
+ 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b,
+ 0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09,
+ 0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60,
+ 0x06, 0x91
+};
+static const u8 enc_assoc013[] __initconst = {
+ 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7
+};
+static const u8 enc_nonce013[] __initconst = {
+ 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43,
+ 0x44, 0x45, 0x46, 0x47
+};
+static const u8 enc_key013[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input014[] __initconst = { };
+static const u8 enc_output014[] __initconst = {
+ 0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5,
+ 0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d
+};
+static const u8 enc_assoc014[] __initconst = { };
+static const u8 enc_nonce014[] __initconst = {
+ 0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1,
+ 0xea, 0x12, 0x37, 0x9d
+};
+static const u8 enc_key014[] __initconst = {
+ 0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96,
+ 0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0,
+ 0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08,
+ 0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0
+};
+
+/* wycheproof - misc */
+static const u8 enc_input015[] __initconst = { };
+static const u8 enc_output015[] __initconst = {
+ 0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b,
+ 0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09
+};
+static const u8 enc_assoc015[] __initconst = {
+ 0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10
+};
+static const u8 enc_nonce015[] __initconst = {
+ 0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16,
+ 0xa3, 0xc6, 0xf6, 0x89
+};
+static const u8 enc_key015[] __initconst = {
+ 0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb,
+ 0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22,
+ 0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63,
+ 0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42
+};
+
+/* wycheproof - misc */
+static const u8 enc_input016[] __initconst = {
+ 0x2a
+};
+static const u8 enc_output016[] __initconst = {
+ 0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80,
+ 0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75,
+ 0x22
+};
+static const u8 enc_assoc016[] __initconst = { };
+static const u8 enc_nonce016[] __initconst = {
+ 0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd,
+ 0xee, 0xab, 0x60, 0xf1
+};
+static const u8 enc_key016[] __initconst = {
+ 0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50,
+ 0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa,
+ 0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a,
+ 0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73
+};
+
+/* wycheproof - misc */
+static const u8 enc_input017[] __initconst = {
+ 0x51
+};
+static const u8 enc_output017[] __initconst = {
+ 0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7,
+ 0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72,
+ 0xb9
+};
+static const u8 enc_assoc017[] __initconst = {
+ 0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53
+};
+static const u8 enc_nonce017[] __initconst = {
+ 0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2,
+ 0x78, 0x2f, 0x44, 0x03
+};
+static const u8 enc_key017[] __initconst = {
+ 0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5,
+ 0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f,
+ 0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5,
+ 0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee
+};
+
+/* wycheproof - misc */
+static const u8 enc_input018[] __initconst = {
+ 0x5c, 0x60
+};
+static const u8 enc_output018[] __initconst = {
+ 0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39,
+ 0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22,
+ 0xe5, 0xe2
+};
+static const u8 enc_assoc018[] __initconst = { };
+static const u8 enc_nonce018[] __initconst = {
+ 0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34,
+ 0x7e, 0x03, 0xf2, 0xdb
+};
+static const u8 enc_key018[] __initconst = {
+ 0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89,
+ 0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e,
+ 0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f,
+ 0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00
+};
+
+/* wycheproof - misc */
+static const u8 enc_input019[] __initconst = {
+ 0xdd, 0xf2
+};
+static const u8 enc_output019[] __initconst = {
+ 0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec,
+ 0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24,
+ 0x37, 0xa2
+};
+static const u8 enc_assoc019[] __initconst = {
+ 0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf
+};
+static const u8 enc_nonce019[] __initconst = {
+ 0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90,
+ 0xb6, 0x04, 0x0a, 0xc6
+};
+static const u8 enc_key019[] __initconst = {
+ 0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48,
+ 0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff,
+ 0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44,
+ 0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58
+};
+
+/* wycheproof - misc */
+static const u8 enc_input020[] __initconst = {
+ 0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31
+};
+static const u8 enc_output020[] __initconst = {
+ 0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed,
+ 0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28,
+ 0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42
+};
+static const u8 enc_assoc020[] __initconst = { };
+static const u8 enc_nonce020[] __initconst = {
+ 0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59,
+ 0x6d, 0xc5, 0x5b, 0xb7
+};
+static const u8 enc_key020[] __initconst = {
+ 0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f,
+ 0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f,
+ 0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3,
+ 0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7
+};
+
+/* wycheproof - misc */
+static const u8 enc_input021[] __initconst = {
+ 0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90
+};
+static const u8 enc_output021[] __initconst = {
+ 0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18,
+ 0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5,
+ 0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba
+};
+static const u8 enc_assoc021[] __initconst = {
+ 0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53
+};
+static const u8 enc_nonce021[] __initconst = {
+ 0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98,
+ 0xde, 0x94, 0x83, 0x96
+};
+static const u8 enc_key021[] __initconst = {
+ 0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5,
+ 0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4,
+ 0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda,
+ 0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input022[] __initconst = {
+ 0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed
+};
+static const u8 enc_output022[] __initconst = {
+ 0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17,
+ 0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93,
+ 0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21
+};
+static const u8 enc_assoc022[] __initconst = { };
+static const u8 enc_nonce022[] __initconst = {
+ 0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2,
+ 0x36, 0x18, 0x23, 0xd3
+};
+static const u8 enc_key022[] __initconst = {
+ 0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf,
+ 0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d,
+ 0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7,
+ 0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52
+};
+
+/* wycheproof - misc */
+static const u8 enc_input023[] __initconst = {
+ 0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf
+};
+static const u8 enc_output023[] __initconst = {
+ 0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0,
+ 0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0,
+ 0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18
+};
+static const u8 enc_assoc023[] __initconst = {
+ 0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d
+};
+static const u8 enc_nonce023[] __initconst = {
+ 0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33,
+ 0x66, 0x48, 0x4a, 0x78
+};
+static const u8 enc_key023[] __initconst = {
+ 0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54,
+ 0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a,
+ 0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe,
+ 0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd
+};
+
+/* wycheproof - misc */
+static const u8 enc_input024[] __initconst = {
+ 0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c,
+ 0x36, 0x8d, 0x14, 0xe0
+};
+static const u8 enc_output024[] __initconst = {
+ 0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a,
+ 0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27,
+ 0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10,
+ 0x6d, 0xcb, 0x29, 0xb4
+};
+static const u8 enc_assoc024[] __initconst = { };
+static const u8 enc_nonce024[] __initconst = {
+ 0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e,
+ 0x07, 0x53, 0x86, 0x56
+};
+static const u8 enc_key024[] __initconst = {
+ 0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0,
+ 0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39,
+ 0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5,
+ 0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe
+};
+
+/* wycheproof - misc */
+static const u8 enc_input025[] __initconst = {
+ 0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7,
+ 0x59, 0xb1, 0xa0, 0xda
+};
+static const u8 enc_output025[] __initconst = {
+ 0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38,
+ 0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c,
+ 0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7,
+ 0xcf, 0x05, 0x07, 0x2f
+};
+static const u8 enc_assoc025[] __initconst = {
+ 0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9
+};
+static const u8 enc_nonce025[] __initconst = {
+ 0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d,
+ 0xd9, 0x06, 0xe9, 0xce
+};
+static const u8 enc_key025[] __initconst = {
+ 0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40,
+ 0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70,
+ 0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e,
+ 0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57
+};
+
+/* wycheproof - misc */
+static const u8 enc_input026[] __initconst = {
+ 0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac,
+ 0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07
+};
+static const u8 enc_output026[] __initconst = {
+ 0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf,
+ 0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a,
+ 0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79,
+ 0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2
+};
+static const u8 enc_assoc026[] __initconst = { };
+static const u8 enc_nonce026[] __initconst = {
+ 0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda,
+ 0xd4, 0x61, 0xd2, 0x3c
+};
+static const u8 enc_key026[] __initconst = {
+ 0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda,
+ 0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77,
+ 0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0,
+ 0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb
+};
+
+/* wycheproof - misc */
+static const u8 enc_input027[] __initconst = {
+ 0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f,
+ 0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73
+};
+static const u8 enc_output027[] __initconst = {
+ 0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81,
+ 0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe,
+ 0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9,
+ 0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17
+};
+static const u8 enc_assoc027[] __initconst = {
+ 0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12
+};
+static const u8 enc_nonce027[] __initconst = {
+ 0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14,
+ 0xc5, 0x03, 0x5b, 0x6a
+};
+static const u8 enc_key027[] __initconst = {
+ 0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f,
+ 0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38,
+ 0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b,
+ 0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d
+};
+
+/* wycheproof - misc */
+static const u8 enc_input028[] __initconst = {
+ 0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0,
+ 0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88
+};
+static const u8 enc_output028[] __initconst = {
+ 0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4,
+ 0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13,
+ 0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a,
+ 0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c
+};
+static const u8 enc_assoc028[] __initconst = { };
+static const u8 enc_nonce028[] __initconst = {
+ 0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8,
+ 0x47, 0x40, 0xad, 0x9b
+};
+static const u8 enc_key028[] __initconst = {
+ 0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7,
+ 0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7,
+ 0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63,
+ 0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a
+};
+
+/* wycheproof - misc */
+static const u8 enc_input029[] __initconst = {
+ 0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09,
+ 0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6
+};
+static const u8 enc_output029[] __initconst = {
+ 0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3,
+ 0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c,
+ 0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc,
+ 0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d
+};
+static const u8 enc_assoc029[] __initconst = {
+ 0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff
+};
+static const u8 enc_nonce029[] __initconst = {
+ 0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80,
+ 0x07, 0x1f, 0x52, 0x66
+};
+static const u8 enc_key029[] __initconst = {
+ 0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8,
+ 0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14,
+ 0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d,
+ 0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input030[] __initconst = {
+ 0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15,
+ 0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e,
+ 0x1f
+};
+static const u8 enc_output030[] __initconst = {
+ 0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd,
+ 0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74,
+ 0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80,
+ 0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08,
+ 0x13
+};
+static const u8 enc_assoc030[] __initconst = { };
+static const u8 enc_nonce030[] __initconst = {
+ 0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42,
+ 0xdc, 0x03, 0x44, 0x5d
+};
+static const u8 enc_key030[] __initconst = {
+ 0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21,
+ 0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e,
+ 0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b,
+ 0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11
+};
+
+/* wycheproof - misc */
+static const u8 enc_input031[] __initconst = {
+ 0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88,
+ 0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c,
+ 0x9c
+};
+static const u8 enc_output031[] __initconst = {
+ 0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b,
+ 0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e,
+ 0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38,
+ 0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c,
+ 0xeb
+};
+static const u8 enc_assoc031[] __initconst = {
+ 0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79
+};
+static const u8 enc_nonce031[] __initconst = {
+ 0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10,
+ 0x63, 0x3d, 0x99, 0x3d
+};
+static const u8 enc_key031[] __initconst = {
+ 0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7,
+ 0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f,
+ 0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82,
+ 0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70
+};
+
+/* wycheproof - misc */
+static const u8 enc_input032[] __initconst = {
+ 0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66,
+ 0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7,
+ 0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11
+};
+static const u8 enc_output032[] __initconst = {
+ 0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d,
+ 0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0,
+ 0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64,
+ 0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d,
+ 0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a
+};
+static const u8 enc_assoc032[] __initconst = { };
+static const u8 enc_nonce032[] __initconst = {
+ 0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76,
+ 0x2c, 0x65, 0xf3, 0x1b
+};
+static const u8 enc_key032[] __initconst = {
+ 0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87,
+ 0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f,
+ 0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2,
+ 0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7
+};
+
+/* wycheproof - misc */
+static const u8 enc_input033[] __initconst = {
+ 0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d,
+ 0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c,
+ 0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93
+};
+static const u8 enc_output033[] __initconst = {
+ 0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69,
+ 0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4,
+ 0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00,
+ 0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63,
+ 0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65
+};
+static const u8 enc_assoc033[] __initconst = {
+ 0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08
+};
+static const u8 enc_nonce033[] __initconst = {
+ 0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd,
+ 0x78, 0x34, 0xed, 0x55
+};
+static const u8 enc_key033[] __initconst = {
+ 0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed,
+ 0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda,
+ 0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d,
+ 0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3
+};
+
+/* wycheproof - misc */
+static const u8 enc_input034[] __initconst = {
+ 0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f,
+ 0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c,
+ 0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26,
+ 0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29
+};
+static const u8 enc_output034[] __initconst = {
+ 0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab,
+ 0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb,
+ 0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1,
+ 0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56,
+ 0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f,
+ 0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93
+};
+static const u8 enc_assoc034[] __initconst = { };
+static const u8 enc_nonce034[] __initconst = {
+ 0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31,
+ 0x37, 0x1a, 0x6f, 0xd2
+};
+static const u8 enc_key034[] __initconst = {
+ 0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e,
+ 0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2,
+ 0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15,
+ 0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71
+};
+
+/* wycheproof - misc */
+static const u8 enc_input035[] __initconst = {
+ 0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2,
+ 0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f,
+ 0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56,
+ 0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb
+};
+static const u8 enc_output035[] __initconst = {
+ 0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20,
+ 0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5,
+ 0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e,
+ 0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53,
+ 0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d,
+ 0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f
+};
+static const u8 enc_assoc035[] __initconst = {
+ 0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48
+};
+static const u8 enc_nonce035[] __initconst = {
+ 0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8,
+ 0xb8, 0x1a, 0x1f, 0x8b
+};
+static const u8 enc_key035[] __initconst = {
+ 0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f,
+ 0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40,
+ 0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4,
+ 0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4
+};
+
+/* wycheproof - misc */
+static const u8 enc_input036[] __initconst = {
+ 0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a,
+ 0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb,
+ 0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce,
+ 0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78
+};
+static const u8 enc_output036[] __initconst = {
+ 0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76,
+ 0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e,
+ 0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50,
+ 0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21,
+ 0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33,
+ 0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea
+};
+static const u8 enc_assoc036[] __initconst = { };
+static const u8 enc_nonce036[] __initconst = {
+ 0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2,
+ 0x2b, 0x7e, 0x6e, 0x6a
+};
+static const u8 enc_key036[] __initconst = {
+ 0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c,
+ 0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb,
+ 0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9,
+ 0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3
+};
+
+/* wycheproof - misc */
+static const u8 enc_input037[] __initconst = {
+ 0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14,
+ 0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3,
+ 0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79,
+ 0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e
+};
+static const u8 enc_output037[] __initconst = {
+ 0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b,
+ 0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2,
+ 0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29,
+ 0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4,
+ 0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d,
+ 0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32
+};
+static const u8 enc_assoc037[] __initconst = {
+ 0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59
+};
+static const u8 enc_nonce037[] __initconst = {
+ 0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5,
+ 0x34, 0x0d, 0xd1, 0xb8
+};
+static const u8 enc_key037[] __initconst = {
+ 0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4,
+ 0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f,
+ 0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd,
+ 0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45
+};
+
+/* wycheproof - misc */
+static const u8 enc_input038[] __initconst = {
+ 0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4,
+ 0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e,
+ 0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11,
+ 0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04,
+ 0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46,
+ 0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a
+};
+static const u8 enc_output038[] __initconst = {
+ 0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6,
+ 0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00,
+ 0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6,
+ 0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27,
+ 0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24,
+ 0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f,
+ 0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08,
+ 0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9
+};
+static const u8 enc_assoc038[] __initconst = { };
+static const u8 enc_nonce038[] __initconst = {
+ 0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9,
+ 0x0b, 0xef, 0x55, 0xd2
+};
+static const u8 enc_key038[] __initconst = {
+ 0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51,
+ 0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63,
+ 0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3,
+ 0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64
+};
+
+/* wycheproof - misc */
+static const u8 enc_input039[] __initconst = {
+ 0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74,
+ 0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3,
+ 0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d,
+ 0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59,
+ 0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c,
+ 0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9
+};
+static const u8 enc_output039[] __initconst = {
+ 0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec,
+ 0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04,
+ 0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e,
+ 0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d,
+ 0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3,
+ 0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4,
+ 0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42,
+ 0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3
+};
+static const u8 enc_assoc039[] __initconst = {
+ 0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84
+};
+static const u8 enc_nonce039[] __initconst = {
+ 0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b,
+ 0x31, 0xcd, 0x4d, 0x95
+};
+static const u8 enc_key039[] __initconst = {
+ 0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c,
+ 0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25,
+ 0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4,
+ 0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac
+};
+
+/* wycheproof - misc */
+static const u8 enc_input040[] __initconst = {
+ 0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e,
+ 0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd,
+ 0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f,
+ 0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f,
+ 0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88,
+ 0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76,
+ 0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d,
+ 0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82
+};
+static const u8 enc_output040[] __initconst = {
+ 0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5,
+ 0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8,
+ 0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c,
+ 0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8,
+ 0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc,
+ 0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33,
+ 0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd,
+ 0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50,
+ 0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56,
+ 0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13
+};
+static const u8 enc_assoc040[] __initconst = { };
+static const u8 enc_nonce040[] __initconst = {
+ 0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28,
+ 0x23, 0xcc, 0x06, 0x5b
+};
+static const u8 enc_key040[] __initconst = {
+ 0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0,
+ 0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8,
+ 0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30,
+ 0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01
+};
+
+/* wycheproof - misc */
+static const u8 enc_input041[] __initconst = {
+ 0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88,
+ 0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d,
+ 0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19,
+ 0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44,
+ 0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec,
+ 0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d,
+ 0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c,
+ 0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17
+};
+static const u8 enc_output041[] __initconst = {
+ 0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16,
+ 0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1,
+ 0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8,
+ 0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97,
+ 0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84,
+ 0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3,
+ 0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20,
+ 0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e,
+ 0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14,
+ 0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec
+};
+static const u8 enc_assoc041[] __initconst = {
+ 0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2
+};
+static const u8 enc_nonce041[] __initconst = {
+ 0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d,
+ 0xe4, 0xeb, 0xb1, 0x9c
+};
+static const u8 enc_key041[] __initconst = {
+ 0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9,
+ 0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2,
+ 0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5,
+ 0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input042[] __initconst = {
+ 0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba,
+ 0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58,
+ 0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06,
+ 0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64,
+ 0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5,
+ 0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74,
+ 0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61,
+ 0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e,
+ 0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6,
+ 0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd,
+ 0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4,
+ 0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5,
+ 0x92
+};
+static const u8 enc_output042[] __initconst = {
+ 0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97,
+ 0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf,
+ 0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98,
+ 0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5,
+ 0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45,
+ 0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10,
+ 0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28,
+ 0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe,
+ 0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c,
+ 0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a,
+ 0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2,
+ 0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3,
+ 0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b,
+ 0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b,
+ 0xb0
+};
+static const u8 enc_assoc042[] __initconst = { };
+static const u8 enc_nonce042[] __initconst = {
+ 0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03,
+ 0xac, 0xde, 0x27, 0x99
+};
+static const u8 enc_key042[] __initconst = {
+ 0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28,
+ 0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d,
+ 0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a,
+ 0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01
+};
+
+/* wycheproof - misc */
+static const u8 enc_input043[] __initconst = {
+ 0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff,
+ 0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc,
+ 0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc,
+ 0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5,
+ 0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd,
+ 0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55,
+ 0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85,
+ 0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b,
+ 0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3,
+ 0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad,
+ 0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92,
+ 0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31,
+ 0x76
+};
+static const u8 enc_output043[] __initconst = {
+ 0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5,
+ 0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06,
+ 0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e,
+ 0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae,
+ 0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37,
+ 0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8,
+ 0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3,
+ 0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d,
+ 0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70,
+ 0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07,
+ 0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6,
+ 0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54,
+ 0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5,
+ 0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac,
+ 0xf6
+};
+static const u8 enc_assoc043[] __initconst = {
+ 0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30
+};
+static const u8 enc_nonce043[] __initconst = {
+ 0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63,
+ 0xe5, 0x22, 0x94, 0x60
+};
+static const u8 enc_key043[] __initconst = {
+ 0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c,
+ 0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4,
+ 0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b,
+ 0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e
+};
+
+/* wycheproof - misc */
+static const u8 enc_input044[] __initconst = {
+ 0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68,
+ 0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2
+};
+static const u8 enc_output044[] __initconst = {
+ 0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6,
+ 0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b,
+ 0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02,
+ 0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4
+};
+static const u8 enc_assoc044[] __initconst = {
+ 0x02
+};
+static const u8 enc_nonce044[] __initconst = {
+ 0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21,
+ 0x02, 0xd5, 0x06, 0x56
+};
+static const u8 enc_key044[] __initconst = {
+ 0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32,
+ 0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50,
+ 0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0,
+ 0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c
+};
+
+/* wycheproof - misc */
+static const u8 enc_input045[] __initconst = {
+ 0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44,
+ 0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8
+};
+static const u8 enc_output045[] __initconst = {
+ 0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1,
+ 0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60,
+ 0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1,
+ 0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58
+};
+static const u8 enc_assoc045[] __initconst = {
+ 0xb6, 0x48
+};
+static const u8 enc_nonce045[] __initconst = {
+ 0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9,
+ 0x5b, 0x3a, 0xa7, 0x13
+};
+static const u8 enc_key045[] __initconst = {
+ 0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41,
+ 0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0,
+ 0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44,
+ 0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc
+};
+
+/* wycheproof - misc */
+static const u8 enc_input046[] __initconst = {
+ 0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23,
+ 0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47
+};
+static const u8 enc_output046[] __initconst = {
+ 0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda,
+ 0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1,
+ 0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b,
+ 0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8
+};
+static const u8 enc_assoc046[] __initconst = {
+ 0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd,
+ 0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0
+};
+static const u8 enc_nonce046[] __initconst = {
+ 0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b,
+ 0xa4, 0x65, 0x96, 0xdf
+};
+static const u8 enc_key046[] __initconst = {
+ 0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08,
+ 0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96,
+ 0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89,
+ 0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input047[] __initconst = {
+ 0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf,
+ 0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2
+};
+static const u8 enc_output047[] __initconst = {
+ 0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb,
+ 0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95,
+ 0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34,
+ 0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f
+};
+static const u8 enc_assoc047[] __initconst = {
+ 0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07,
+ 0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b
+};
+static const u8 enc_nonce047[] __initconst = {
+ 0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6,
+ 0x80, 0x92, 0x66, 0xd9
+};
+static const u8 enc_key047[] __initconst = {
+ 0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91,
+ 0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23,
+ 0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6,
+ 0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16
+};
+
+/* wycheproof - misc */
+static const u8 enc_input048[] __initconst = {
+ 0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32,
+ 0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3
+};
+static const u8 enc_output048[] __initconst = {
+ 0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b,
+ 0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68,
+ 0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84,
+ 0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3
+};
+static const u8 enc_assoc048[] __initconst = {
+ 0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab,
+ 0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c,
+ 0x0e
+};
+static const u8 enc_nonce048[] __initconst = {
+ 0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40,
+ 0xfc, 0x10, 0x68, 0xc3
+};
+static const u8 enc_key048[] __initconst = {
+ 0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b,
+ 0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97,
+ 0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b,
+ 0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57
+};
+
+/* wycheproof - misc */
+static const u8 enc_input049[] __initconst = {
+ 0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2,
+ 0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6
+};
+static const u8 enc_output049[] __initconst = {
+ 0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87,
+ 0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11,
+ 0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e,
+ 0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6
+};
+static const u8 enc_assoc049[] __initconst = {
+ 0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8,
+ 0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94,
+ 0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5,
+ 0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda
+};
+static const u8 enc_nonce049[] __initconst = {
+ 0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf,
+ 0xad, 0x14, 0xd5, 0x3e
+};
+static const u8 enc_key049[] __initconst = {
+ 0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d,
+ 0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc,
+ 0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47,
+ 0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0
+};
+
+/* wycheproof - misc */
+static const u8 enc_input050[] __initconst = {
+ 0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18,
+ 0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c
+};
+static const u8 enc_output050[] __initconst = {
+ 0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6,
+ 0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca,
+ 0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b,
+ 0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04
+};
+static const u8 enc_assoc050[] __initconst = {
+ 0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22,
+ 0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07,
+ 0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90,
+ 0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37
+};
+static const u8 enc_nonce050[] __initconst = {
+ 0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28,
+ 0x6a, 0x7b, 0x76, 0x51
+};
+static const u8 enc_key050[] __initconst = {
+ 0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1,
+ 0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c,
+ 0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a,
+ 0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30
+};
+
+/* wycheproof - misc */
+static const u8 enc_input051[] __initconst = {
+ 0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59,
+ 0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6
+};
+static const u8 enc_output051[] __initconst = {
+ 0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70,
+ 0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd,
+ 0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4,
+ 0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43
+};
+static const u8 enc_assoc051[] __initconst = {
+ 0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92,
+ 0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57,
+ 0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75,
+ 0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88,
+ 0x73
+};
+static const u8 enc_nonce051[] __initconst = {
+ 0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0,
+ 0xa8, 0xfa, 0x89, 0x49
+};
+static const u8 enc_key051[] __initconst = {
+ 0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27,
+ 0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74,
+ 0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c,
+ 0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99
+};
+
+/* wycheproof - misc */
+static const u8 enc_input052[] __initconst = {
+ 0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3,
+ 0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed
+};
+static const u8 enc_output052[] __initconst = {
+ 0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc,
+ 0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b,
+ 0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed,
+ 0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32
+};
+static const u8 enc_assoc052[] __initconst = {
+ 0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a,
+ 0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14,
+ 0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae,
+ 0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c,
+ 0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92,
+ 0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61
+};
+static const u8 enc_nonce052[] __initconst = {
+ 0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73,
+ 0x0e, 0xc3, 0x5d, 0x12
+};
+static const u8 enc_key052[] __initconst = {
+ 0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5,
+ 0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf,
+ 0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c,
+ 0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4
+};
+
+/* wycheproof - misc */
+static const u8 enc_input053[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe
+};
+static const u8 enc_output053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7,
+ 0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07
+};
+static const u8 enc_assoc053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce053[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key053[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input054[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd
+};
+static const u8 enc_output054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2,
+ 0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce
+};
+static const u8 enc_assoc054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce054[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key054[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input055[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd,
+ 0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa,
+ 0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02,
+ 0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80,
+ 0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4,
+ 0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7,
+ 0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed,
+ 0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38,
+ 0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7
+};
+static const u8 enc_output055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3,
+ 0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e
+};
+static const u8 enc_assoc055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce055[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key055[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input056[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output056[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27,
+ 0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6
+};
+static const u8 enc_assoc056[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce056[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key056[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input057[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42
+};
+static const u8 enc_output057[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b,
+ 0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68
+};
+static const u8 enc_assoc057[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce057[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key057[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input058[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42,
+ 0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05,
+ 0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd,
+ 0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f,
+ 0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b,
+ 0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38,
+ 0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12,
+ 0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7,
+ 0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output058[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91,
+ 0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62
+};
+static const u8 enc_assoc058[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce058[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key058[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input059[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e
+};
+static const u8 enc_output059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75,
+ 0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4
+};
+static const u8 enc_assoc059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce059[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key059[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input060[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d
+};
+static const u8 enc_output060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba,
+ 0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a
+};
+static const u8 enc_assoc060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce060[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key060[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input061[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d,
+ 0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a,
+ 0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82,
+ 0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00,
+ 0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44,
+ 0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47,
+ 0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d,
+ 0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8,
+ 0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47
+};
+static const u8 enc_output061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f,
+ 0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a
+};
+static const u8 enc_assoc061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_nonce061[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key061[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input062[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1
+};
+static const u8 enc_output062[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59,
+ 0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19
+};
+static const u8 enc_assoc062[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce062[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key062[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input063[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2
+};
+static const u8 enc_output063[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2,
+ 0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c
+};
+static const u8 enc_assoc063[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce063[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key063[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input064[] __initconst = {
+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2,
+ 0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85,
+ 0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d,
+ 0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff,
+ 0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb,
+ 0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8,
+ 0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92,
+ 0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47,
+ 0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8
+};
+static const u8 enc_output064[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5,
+ 0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06
+};
+static const u8 enc_assoc064[] __initconst = {
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_nonce064[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key064[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input065[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output065[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf,
+ 0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67
+};
+static const u8 enc_assoc065[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce065[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key065[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input066[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42
+};
+static const u8 enc_output066[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89,
+ 0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90
+};
+static const u8 enc_assoc066[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce066[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key066[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input067[] __initconst = {
+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42,
+ 0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05,
+ 0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd,
+ 0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f,
+ 0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b,
+ 0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38,
+ 0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12,
+ 0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7,
+ 0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output067[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94,
+ 0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22
+};
+static const u8 enc_assoc067[] __initconst = {
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce067[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key067[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input068[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41
+};
+static const u8 enc_output068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9,
+ 0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d
+};
+static const u8 enc_assoc068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce068[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key068[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input069[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42
+};
+static const u8 enc_output069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde,
+ 0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82
+};
+static const u8 enc_assoc069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce069[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key069[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input070[] __initconst = {
+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42,
+ 0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05,
+ 0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd,
+ 0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f,
+ 0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b,
+ 0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38,
+ 0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12,
+ 0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7,
+ 0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38
+};
+static const u8 enc_output070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3,
+ 0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71
+};
+static const u8 enc_assoc070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce070[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key070[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input071[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe
+};
+static const u8 enc_output071[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5,
+ 0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20
+};
+static const u8 enc_assoc071[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce071[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key071[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input072[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd
+};
+static const u8 enc_output072[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e,
+ 0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4
+};
+static const u8 enc_assoc072[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce072[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key072[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - misc */
+static const u8 enc_input073[] __initconst = {
+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd,
+ 0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa,
+ 0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02,
+ 0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80,
+ 0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4,
+ 0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7,
+ 0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed,
+ 0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38,
+ 0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7
+};
+static const u8 enc_output073[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51,
+ 0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f
+};
+static const u8 enc_assoc073[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_nonce073[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
+};
+static const u8 enc_key073[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input074[] __initconst = {
+ 0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51,
+ 0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69,
+ 0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f,
+ 0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90,
+ 0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0,
+ 0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac,
+ 0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2,
+ 0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5,
+ 0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b,
+ 0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49,
+ 0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16,
+ 0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58,
+ 0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73,
+ 0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3,
+ 0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c,
+ 0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a
+};
+static const u8 enc_output074[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85,
+ 0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca
+};
+static const u8 enc_assoc074[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce074[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x00, 0x02, 0x50, 0x6e
+};
+static const u8 enc_key074[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input075[] __initconst = {
+ 0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75,
+ 0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56,
+ 0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2,
+ 0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed,
+ 0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f,
+ 0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f,
+ 0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0,
+ 0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8,
+ 0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32,
+ 0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8,
+ 0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b,
+ 0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b,
+ 0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd,
+ 0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f,
+ 0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1,
+ 0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94
+};
+static const u8 enc_output075[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7,
+ 0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5
+};
+static const u8 enc_assoc075[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce075[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x00, 0x03, 0x18, 0xa5
+};
+static const u8 enc_key075[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input076[] __initconst = {
+ 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85,
+ 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02,
+ 0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09,
+ 0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8,
+ 0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd,
+ 0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85,
+ 0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39,
+ 0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae,
+ 0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed,
+ 0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4,
+ 0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c,
+ 0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33,
+ 0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19,
+ 0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11,
+ 0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9,
+ 0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5
+};
+static const u8 enc_output076[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d,
+ 0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63
+};
+static const u8 enc_assoc076[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce076[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0
+};
+static const u8 enc_key076[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input077[] __initconst = {
+ 0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae,
+ 0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16,
+ 0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7,
+ 0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61,
+ 0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2,
+ 0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf,
+ 0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf,
+ 0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48,
+ 0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8,
+ 0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa,
+ 0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87,
+ 0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32,
+ 0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1,
+ 0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b,
+ 0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c,
+ 0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f
+};
+static const u8 enc_output077[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4,
+ 0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa
+};
+static const u8 enc_assoc077[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce077[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66
+};
+static const u8 enc_key077[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input078[] __initconst = {
+ 0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef,
+ 0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5,
+ 0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c,
+ 0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d,
+ 0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf,
+ 0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6,
+ 0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe,
+ 0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5,
+ 0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62,
+ 0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c,
+ 0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f,
+ 0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2,
+ 0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33,
+ 0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e,
+ 0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9,
+ 0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62
+};
+static const u8 enc_output078[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7,
+ 0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d
+};
+static const u8 enc_assoc078[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce078[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90
+};
+static const u8 enc_key078[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input079[] __initconst = {
+ 0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9,
+ 0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6,
+ 0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c,
+ 0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82,
+ 0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21,
+ 0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12,
+ 0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c,
+ 0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d,
+ 0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32,
+ 0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84,
+ 0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3,
+ 0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24,
+ 0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94,
+ 0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53,
+ 0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71,
+ 0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd
+};
+static const u8 enc_output079[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2,
+ 0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e
+};
+static const u8 enc_assoc079[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce079[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a
+};
+static const u8 enc_key079[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input080[] __initconst = {
+ 0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5,
+ 0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9,
+ 0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b,
+ 0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc,
+ 0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38,
+ 0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27,
+ 0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83,
+ 0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b,
+ 0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda,
+ 0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4,
+ 0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc,
+ 0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b,
+ 0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4,
+ 0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00,
+ 0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d,
+ 0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1
+};
+static const u8 enc_output080[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a,
+ 0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02
+};
+static const u8 enc_assoc080[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce080[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40
+};
+static const u8 enc_key080[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input081[] __initconst = {
+ 0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92,
+ 0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c,
+ 0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9,
+ 0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06,
+ 0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a,
+ 0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa,
+ 0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07,
+ 0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6,
+ 0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06,
+ 0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8,
+ 0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5,
+ 0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5,
+ 0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f,
+ 0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86,
+ 0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23,
+ 0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1
+};
+static const u8 enc_output081[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba,
+ 0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0
+};
+static const u8 enc_assoc081[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce081[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35
+};
+static const u8 enc_key081[] __initconst = {
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input082[] __initconst = {
+ 0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb,
+ 0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49,
+ 0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16,
+ 0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d,
+ 0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9,
+ 0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e,
+ 0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d,
+ 0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc,
+ 0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6,
+ 0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca,
+ 0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83,
+ 0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08,
+ 0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95,
+ 0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66,
+ 0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83,
+ 0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07
+};
+static const u8 enc_output082[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42,
+ 0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae
+};
+static const u8 enc_assoc082[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce082[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5
+};
+static const u8 enc_key082[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input083[] __initconst = {
+ 0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58,
+ 0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84,
+ 0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5,
+ 0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf,
+ 0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39,
+ 0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03,
+ 0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f,
+ 0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa,
+ 0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08,
+ 0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59,
+ 0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56,
+ 0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9,
+ 0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43,
+ 0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa,
+ 0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba,
+ 0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a
+};
+static const u8 enc_output083[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b,
+ 0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19
+};
+static const u8 enc_assoc083[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce083[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4
+};
+static const u8 enc_key083[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input084[] __initconst = {
+ 0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18,
+ 0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b,
+ 0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99,
+ 0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c,
+ 0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde,
+ 0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2,
+ 0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1,
+ 0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8,
+ 0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b,
+ 0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f,
+ 0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77,
+ 0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8,
+ 0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c,
+ 0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf,
+ 0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97,
+ 0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee
+};
+static const u8 enc_output084[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab,
+ 0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87
+};
+static const u8 enc_assoc084[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce084[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8
+};
+static const u8 enc_key084[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - checking for int overflows */
+static const u8 enc_input085[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6,
+ 0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed,
+ 0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7,
+ 0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37,
+ 0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a,
+ 0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b
+};
+static const u8 enc_output085[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68,
+ 0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43
+};
+static const u8 enc_assoc085[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce085[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key085[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input086[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output086[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+};
+static const u8 enc_assoc086[] __initconst = {
+ 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1,
+ 0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00
+};
+static const u8 enc_nonce086[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key086[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input087[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output087[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc087[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f,
+ 0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58
+};
+static const u8 enc_nonce087[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key087[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input088[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output088[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_assoc088[] __initconst = {
+ 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f,
+ 0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00
+};
+static const u8 enc_nonce088[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key088[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input089[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output089[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
+};
+static const u8 enc_assoc089[] __initconst = {
+ 0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae,
+ 0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02
+};
+static const u8 enc_nonce089[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key089[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input090[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output090[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
+};
+static const u8 enc_assoc090[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe,
+ 0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3
+};
+static const u8 enc_nonce090[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key090[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input091[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output091[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc091[] __initconst = {
+ 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30,
+ 0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01
+};
+static const u8 enc_nonce091[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key091[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - special case tag */
+static const u8 enc_input092[] __initconst = {
+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
+};
+static const u8 enc_output092[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+static const u8 enc_assoc092[] __initconst = {
+ 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11,
+ 0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01
+};
+static const u8 enc_nonce092[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b
+};
+static const u8 enc_key092[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input093[] __initconst = {
+ 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d,
+ 0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44,
+ 0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3,
+ 0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b,
+ 0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb,
+ 0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c
+};
+static const u8 enc_output093[] __initconst = {
+ 0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14,
+ 0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7,
+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
+ 0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77,
+ 0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a
+};
+static const u8 enc_assoc093[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce093[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key093[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input094[] __initconst = {
+ 0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90,
+ 0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45,
+ 0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef,
+ 0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09,
+ 0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20,
+ 0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10
+};
+static const u8 enc_output094[] __initconst = {
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66,
+ 0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02,
+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
+ 0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6,
+ 0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc
+};
+static const u8 enc_assoc094[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce094[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key094[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input095[] __initconst = {
+ 0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b,
+ 0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46,
+ 0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6,
+ 0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a,
+ 0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79,
+ 0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13
+};
+static const u8 enc_output095[] __initconst = {
+ 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad,
+ 0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
+ 0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02,
+ 0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0
+};
+static const u8 enc_assoc095[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce095[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key095[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input096[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60,
+ 0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c,
+ 0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67,
+ 0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94,
+ 0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94,
+ 0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04
+};
+static const u8 enc_output096[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96,
+ 0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b,
+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
+ 0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73,
+ 0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8
+};
+static const u8 enc_assoc096[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce096[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key096[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input097[] __initconst = {
+ 0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6,
+ 0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01,
+ 0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce,
+ 0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f
+};
+static const u8 enc_output097[] __initconst = {
+ 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00,
+ 0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
+ 0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b,
+ 0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26
+};
+static const u8 enc_assoc097[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce097[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key097[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input098[] __initconst = {
+ 0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62,
+ 0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45,
+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0,
+ 0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8,
+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f,
+ 0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1
+};
+static const u8 enc_output098[] __initconst = {
+ 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94,
+ 0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
+ 0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e,
+ 0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b
+};
+static const u8 enc_assoc098[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce098[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key098[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input099[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12,
+ 0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98,
+ 0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95,
+ 0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48,
+ 0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66,
+ 0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8
+};
+static const u8 enc_output099[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4,
+ 0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf,
+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
+ 0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56,
+ 0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1
+};
+static const u8 enc_assoc099[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce099[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key099[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input100[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70,
+ 0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd,
+ 0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77,
+ 0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75,
+ 0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84,
+ 0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5
+};
+static const u8 enc_output100[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86,
+ 0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa,
+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
+ 0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59,
+ 0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6
+};
+static const u8 enc_assoc100[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce100[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key100[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input101[] __initconst = {
+ 0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c,
+ 0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde,
+ 0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92,
+ 0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f,
+ 0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea,
+ 0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88
+};
+static const u8 enc_output101[] __initconst = {
+ 0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5,
+ 0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d,
+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
+ 0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65,
+ 0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05
+};
+static const u8 enc_assoc101[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce101[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key101[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input102[] __initconst = {
+ 0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0,
+ 0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d,
+ 0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73,
+ 0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b,
+ 0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b,
+ 0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c
+};
+static const u8 enc_output102[] __initconst = {
+ 0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39,
+ 0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e,
+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
+ 0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56,
+ 0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04
+};
+static const u8 enc_assoc102[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce102[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key102[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input103[] __initconst = {
+ 0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d,
+ 0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce,
+ 0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c,
+ 0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a,
+ 0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14,
+ 0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d
+};
+static const u8 enc_output103[] __initconst = {
+ 0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94,
+ 0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d,
+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
+ 0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a,
+ 0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08
+};
+static const u8 enc_assoc103[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce103[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key103[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input104[] __initconst = {
+ 0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac,
+ 0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b,
+ 0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9,
+ 0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98,
+ 0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1,
+ 0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f
+};
+static const u8 enc_output104[] __initconst = {
+ 0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35,
+ 0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
+ 0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d,
+ 0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d
+};
+static const u8 enc_assoc104[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce104[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key104[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input105[] __initconst = {
+ 0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5,
+ 0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99,
+ 0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e,
+ 0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a,
+ 0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46,
+ 0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d
+};
+static const u8 enc_output105[] __initconst = {
+ 0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c,
+ 0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
+ 0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88,
+ 0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd
+};
+static const u8 enc_assoc105[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce105[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key105[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input106[] __initconst = {
+ 0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06,
+ 0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5,
+ 0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68,
+ 0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a,
+ 0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10,
+ 0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d
+};
+static const u8 enc_output106[] __initconst = {
+ 0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f,
+ 0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76,
+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
+ 0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88,
+ 0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a
+};
+static const u8 enc_assoc106[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce106[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key106[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input107[] __initconst = {
+ 0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71,
+ 0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44,
+ 0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c,
+ 0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a,
+ 0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3,
+ 0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13
+};
+static const u8 enc_output107[] __initconst = {
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87,
+ 0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03,
+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
+ 0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02,
+ 0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88
+};
+static const u8 enc_assoc107[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce107[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key107[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input108[] __initconst = {
+ 0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43,
+ 0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6,
+ 0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11,
+ 0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99,
+ 0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69,
+ 0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e
+};
+static const u8 enc_output108[] __initconst = {
+ 0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda,
+ 0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15,
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
+ 0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96,
+ 0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a
+};
+static const u8 enc_assoc108[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce108[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key108[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input109[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a,
+ 0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb,
+ 0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96,
+ 0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f,
+ 0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59,
+ 0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16
+};
+static const u8 enc_output109[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac,
+ 0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c,
+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
+ 0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1,
+ 0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb
+};
+static const u8 enc_assoc109[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce109[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key109[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input110[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5,
+ 0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9,
+ 0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b,
+ 0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b,
+ 0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4,
+ 0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12
+};
+static const u8 enc_output110[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43,
+ 0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
+ 0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6,
+ 0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe
+};
+static const u8 enc_assoc110[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce110[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key110[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input111[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda,
+ 0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55,
+ 0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5,
+ 0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f,
+ 0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a,
+ 0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16
+};
+static const u8 enc_output111[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c,
+ 0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12,
+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
+ 0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4,
+ 0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9
+};
+static const u8 enc_assoc111[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce111[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key111[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input112[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38,
+ 0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a,
+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65,
+ 0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a,
+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa,
+ 0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13
+};
+static const u8 enc_output112[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce,
+ 0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
+ 0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7,
+ 0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce
+};
+static const u8 enc_assoc112[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce112[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key112[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input113[] __initconst = {
+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86,
+ 0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f,
+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1,
+ 0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f,
+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e,
+ 0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16
+};
+static const u8 enc_output113[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70,
+ 0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
+ 0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa,
+ 0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3
+};
+static const u8 enc_assoc113[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce113[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key113[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input114[] __initconst = {
+ 0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73,
+ 0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc,
+ 0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a,
+ 0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f,
+ 0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2,
+ 0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88
+};
+static const u8 enc_output114[] __initconst = {
+ 0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea,
+ 0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f,
+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
+ 0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69,
+ 0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1
+};
+static const u8 enc_assoc114[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce114[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key114[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input115[] __initconst = {
+ 0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb,
+ 0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68,
+ 0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
+ 0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33,
+ 0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98,
+ 0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
+ 0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b,
+ 0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f
+};
+static const u8 enc_output115[] __initconst = {
+ 0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72,
+ 0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb,
+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
+ 0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74,
+ 0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05
+};
+static const u8 enc_assoc115[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce115[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key115[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input116[] __initconst = {
+ 0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd,
+ 0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44,
+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea,
+ 0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a,
+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25,
+ 0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13
+};
+static const u8 enc_output116[] __initconst = {
+ 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b,
+ 0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
+ 0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50,
+ 0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2
+};
+static const u8 enc_assoc116[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce116[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key116[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input117[] __initconst = {
+ 0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d,
+ 0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47,
+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06,
+ 0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08,
+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9,
+ 0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11
+};
+static const u8 enc_output117[] __initconst = {
+ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb,
+ 0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
+ 0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53,
+ 0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7
+};
+static const u8 enc_assoc117[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce117[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key117[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+/* wycheproof - edge case intermediate sums in poly1305 */
+static const u8 enc_input118[] __initconst = {
+ 0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
+ 0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03,
+ 0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43,
+ 0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
+ 0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a,
+ 0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f,
+ 0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
+ 0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85,
+ 0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16
+};
+static const u8 enc_output118[] __initconst = {
+ 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5,
+ 0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
+ 0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0,
+ 0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1
+};
+static const u8 enc_assoc118[] __initconst = {
+ 0xff, 0xff, 0xff, 0xff
+};
+static const u8 enc_nonce118[] __initconst = {
+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
+};
+static const u8 enc_key118[] __initconst = {
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
+};
+
+static const struct chacha20poly1305_testvec
+chacha20poly1305_enc_vectors[] __initconst = {
+ { enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001,
+ sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) },
+ { enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002,
+ sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) },
+ { enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003,
+ sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) },
+ { enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004,
+ sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) },
+ { enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005,
+ sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) },
+ { enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006,
+ sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) },
+ { enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007,
+ sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) },
+ { enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008,
+ sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) },
+ { enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009,
+ sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) },
+ { enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010,
+ sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) },
+ { enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011,
+ sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) },
+ { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012,
+ sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) },
+ { enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013,
+ sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) },
+ { enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014,
+ sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) },
+ { enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015,
+ sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) },
+ { enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016,
+ sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) },
+ { enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017,
+ sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) },
+ { enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018,
+ sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) },
+ { enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019,
+ sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) },
+ { enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020,
+ sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) },
+ { enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021,
+ sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) },
+ { enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022,
+ sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) },
+ { enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023,
+ sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) },
+ { enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024,
+ sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) },
+ { enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025,
+ sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) },
+ { enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026,
+ sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) },
+ { enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027,
+ sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) },
+ { enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028,
+ sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) },
+ { enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029,
+ sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) },
+ { enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030,
+ sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) },
+ { enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031,
+ sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) },
+ { enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032,
+ sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) },
+ { enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033,
+ sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) },
+ { enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034,
+ sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) },
+ { enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035,
+ sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) },
+ { enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036,
+ sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) },
+ { enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037,
+ sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) },
+ { enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038,
+ sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) },
+ { enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039,
+ sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) },
+ { enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040,
+ sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) },
+ { enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041,
+ sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) },
+ { enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042,
+ sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) },
+ { enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043,
+ sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) },
+ { enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044,
+ sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) },
+ { enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045,
+ sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) },
+ { enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046,
+ sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) },
+ { enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047,
+ sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) },
+ { enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048,
+ sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) },
+ { enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049,
+ sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) },
+ { enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050,
+ sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) },
+ { enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051,
+ sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) },
+ { enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052,
+ sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) },
+ { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053,
+ sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) },
+ { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054,
+ sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) },
+ { enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055,
+ sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) },
+ { enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056,
+ sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) },
+ { enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057,
+ sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) },
+ { enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058,
+ sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) },
+ { enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059,
+ sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) },
+ { enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060,
+ sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) },
+ { enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061,
+ sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) },
+ { enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062,
+ sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) },
+ { enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063,
+ sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) },
+ { enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064,
+ sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) },
+ { enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065,
+ sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) },
+ { enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066,
+ sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) },
+ { enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067,
+ sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) },
+ { enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068,
+ sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) },
+ { enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069,
+ sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) },
+ { enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070,
+ sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) },
+ { enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071,
+ sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) },
+ { enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072,
+ sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) },
+ { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073,
+ sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) },
+ { enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074,
+ sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) },
+ { enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075,
+ sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) },
+ { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076,
+ sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) },
+ { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077,
+ sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) },
+ { enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078,
+ sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) },
+ { enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079,
+ sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) },
+ { enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080,
+ sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) },
+ { enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081,
+ sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) },
+ { enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082,
+ sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) },
+ { enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083,
+ sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) },
+ { enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084,
+ sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) },
+ { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085,
+ sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) },
+ { enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086,
+ sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) },
+ { enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087,
+ sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) },
+ { enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088,
+ sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) },
+ { enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089,
+ sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) },
+ { enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090,
+ sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) },
+ { enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091,
+ sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) },
+ { enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092,
+ sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) },
+ { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093,
+ sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) },
+ { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094,
+ sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) },
+ { enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095,
+ sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) },
+ { enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096,
+ sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) },
+ { enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097,
+ sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) },
+ { enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098,
+ sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) },
+ { enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099,
+ sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) },
+ { enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100,
+ sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) },
+ { enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101,
+ sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) },
+ { enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102,
+ sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) },
+ { enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103,
+ sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) },
+ { enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104,
+ sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) },
+ { enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105,
+ sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) },
+ { enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106,
+ sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) },
+ { enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107,
+ sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) },
+ { enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108,
+ sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) },
+ { enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109,
+ sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) },
+ { enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110,
+ sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) },
+ { enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111,
+ sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) },
+ { enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112,
+ sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) },
+ { enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113,
+ sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) },
+ { enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114,
+ sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) },
+ { enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115,
+ sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) },
+ { enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116,
+ sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) },
+ { enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117,
+ sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) },
+ { enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118,
+ sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) }
+};
+
+static const u8 dec_input001[] __initconst = {
+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
+ 0x38
+};
+static const u8 dec_output001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 dec_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 dec_nonce001[] __initconst = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
+};
+static const u8 dec_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const u8 dec_input002[] __initconst = {
+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
+};
+static const u8 dec_output002[] __initconst = { };
+static const u8 dec_assoc002[] __initconst = { };
+static const u8 dec_nonce002[] __initconst = {
+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
+};
+static const u8 dec_key002[] __initconst = {
+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
+};
+
+static const u8 dec_input003[] __initconst = {
+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
+};
+static const u8 dec_output003[] __initconst = { };
+static const u8 dec_assoc003[] __initconst = {
+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
+};
+static const u8 dec_nonce003[] __initconst = {
+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
+};
+static const u8 dec_key003[] __initconst = {
+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
+};
+
+static const u8 dec_input004[] __initconst = {
+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
+ 0x89
+};
+static const u8 dec_output004[] __initconst = {
+ 0xa4
+};
+static const u8 dec_assoc004[] __initconst = {
+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
+};
+static const u8 dec_nonce004[] __initconst = {
+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
+};
+static const u8 dec_key004[] __initconst = {
+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
+};
+
+static const u8 dec_input005[] __initconst = {
+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
+ 0xac
+};
+static const u8 dec_output005[] __initconst = {
+ 0x2d
+};
+static const u8 dec_assoc005[] __initconst = { };
+static const u8 dec_nonce005[] __initconst = {
+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
+};
+static const u8 dec_key005[] __initconst = {
+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
+};
+
+static const u8 dec_input006[] __initconst = {
+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
+ 0xeb
+};
+static const u8 dec_output006[] __initconst = {
+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
+ 0x8f
+};
+static const u8 dec_assoc006[] __initconst = {
+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
+};
+static const u8 dec_nonce006[] __initconst = {
+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
+};
+static const u8 dec_key006[] __initconst = {
+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
+};
+
+static const u8 dec_input007[] __initconst = {
+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
+};
+static const u8 dec_output007[] __initconst = {
+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
+};
+static const u8 dec_assoc007[] __initconst = { };
+static const u8 dec_nonce007[] __initconst = {
+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
+};
+static const u8 dec_key007[] __initconst = {
+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
+};
+
+static const u8 dec_input008[] __initconst = {
+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
+};
+static const u8 dec_output008[] __initconst = {
+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
+};
+static const u8 dec_assoc008[] __initconst = { };
+static const u8 dec_nonce008[] __initconst = {
+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
+};
+static const u8 dec_key008[] __initconst = {
+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
+};
+
+static const u8 dec_input009[] __initconst = {
+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
+ 0xae
+};
+static const u8 dec_output009[] __initconst = {
+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
+ 0x65
+};
+static const u8 dec_assoc009[] __initconst = {
+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
+ 0xef
+};
+static const u8 dec_nonce009[] __initconst = {
+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
+};
+static const u8 dec_key009[] __initconst = {
+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
+};
+
+static const u8 dec_input010[] __initconst = {
+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
+};
+static const u8 dec_output010[] __initconst = {
+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
+};
+static const u8 dec_assoc010[] __initconst = {
+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
+};
+static const u8 dec_nonce010[] __initconst = {
+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
+};
+static const u8 dec_key010[] __initconst = {
+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
+};
+
+static const u8 dec_input011[] __initconst = {
+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c
+};
+static const u8 dec_output011[] __initconst = {
+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
+ 0x10, 0x1e, 0xbf, 0xec, 0xa8
+};
+static const u8 dec_assoc011[] __initconst = {
+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
+};
+static const u8 dec_nonce011[] __initconst = {
+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
+};
+static const u8 dec_key011[] __initconst = {
+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
+};
+
+static const u8 dec_input012[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd6
+};
+static const u8 dec_output012[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 dec_assoc012[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 dec_nonce012[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 dec_key012[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+static const u8 dec_input013[] __initconst = {
+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
+ 0x70, 0xcf, 0xd7
+};
+static const u8 dec_output013[] __initconst = {
+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
+ 0x78, 0xec, 0x00
+};
+static const u8 dec_assoc013[] __initconst = {
+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
+};
+static const u8 dec_nonce013[] __initconst = {
+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
+};
+static const u8 dec_key013[] __initconst = {
+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
+};
+
+static const struct chacha20poly1305_testvec
+chacha20poly1305_dec_vectors[] __initconst = {
+ { dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001,
+ sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) },
+ { dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002,
+ sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) },
+ { dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003,
+ sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) },
+ { dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004,
+ sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) },
+ { dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005,
+ sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) },
+ { dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006,
+ sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) },
+ { dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007,
+ sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) },
+ { dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008,
+ sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) },
+ { dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009,
+ sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) },
+ { dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010,
+ sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) },
+ { dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011,
+ sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) },
+ { dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012,
+ sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) },
+ { dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013,
+ sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013),
+ true }
+};
+
+static const u8 xenc_input001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 xenc_output001[] __initconst = {
+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
+ 0x9c
+};
+static const u8 xenc_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 xenc_nonce001[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+static const u8 xenc_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const struct chacha20poly1305_testvec
+xchacha20poly1305_enc_vectors[] __initconst = {
+ { xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001,
+ sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) }
+};
+
+static const u8 xdec_input001[] __initconst = {
+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
+ 0x9c
+};
+static const u8 xdec_output001[] __initconst = {
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
+ 0x9d
+};
+static const u8 xdec_assoc001[] __initconst = {
+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x4e, 0x91
+};
+static const u8 xdec_nonce001[] __initconst = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+static const u8 xdec_key001[] __initconst = {
+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
+};
+
+static const struct chacha20poly1305_testvec
+xchacha20poly1305_dec_vectors[] __initconst = {
+ { xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001,
+ sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) }
+};
+
+/* This is for the selftests-only, since it is only useful for the purpose of
+ * testing the underlying primitives and interactions.
+ */
+static void __init
+chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[12],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ u32 chacha20_state[CHACHA_STATE_WORDS];
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ __le64 lens[2];
+ } b = {{ 0 }};
+ u8 bottom_row[16] = { 0 };
+ u32 le_key[8];
+ int i;
+
+ memcpy(&bottom_row[4], nonce, 12);
+ for (i = 0; i < 8; ++i)
+ le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i);
+ chacha_init(chacha20_state, le_key, bottom_row);
+ chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+ poly1305_update(&poly1305_state, ad, ad_len);
+ poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf);
+ chacha20_crypt(chacha20_state, dst, src, src_len);
+ poly1305_update(&poly1305_state, dst, src_len);
+ poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf);
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+ poly1305_final(&poly1305_state, dst + src_len);
+}
+
+static void __init
+chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 *nonce, const size_t nonce_len,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ if (nonce_len == 8)
+ chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
+ get_unaligned_le64(nonce), key);
+ else if (nonce_len == 12)
+ chacha20poly1305_encrypt_bignonce(dst, src, src_len, ad,
+ ad_len, nonce, key);
+ else
+ BUG();
+}
+
+static bool __init
+decryption_success(bool func_ret, bool expect_failure, int memcmp_result)
+{
+ if (expect_failure)
+ return !func_ret;
+ return func_ret && !memcmp_result;
+}
+
+bool __init chacha20poly1305_selftest(void)
+{
+ enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 };
+ size_t i, j, k, total_len;
+ u8 *computed_output = NULL, *input = NULL;
+ bool success = true, ret;
+ struct scatterlist sg_src[3];
+
+ computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
+ if (!computed_output || !input) {
+ pr_err("chacha20poly1305 self-test malloc: FAIL\n");
+ success = false;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ chacha20poly1305_selftest_encrypt(computed_output,
+ chacha20poly1305_enc_vectors[i].input,
+ chacha20poly1305_enc_vectors[i].ilen,
+ chacha20poly1305_enc_vectors[i].assoc,
+ chacha20poly1305_enc_vectors[i].alen,
+ chacha20poly1305_enc_vectors[i].nonce,
+ chacha20poly1305_enc_vectors[i].nlen,
+ chacha20poly1305_enc_vectors[i].key);
+ if (memcmp(computed_output,
+ chacha20poly1305_enc_vectors[i].output,
+ chacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
+ if (chacha20poly1305_enc_vectors[i].nlen != 8)
+ continue;
+ memcpy(computed_output, chacha20poly1305_enc_vectors[i].input,
+ chacha20poly1305_enc_vectors[i].ilen);
+ sg_init_one(sg_src, computed_output,
+ chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE);
+ ret = chacha20poly1305_encrypt_sg_inplace(sg_src,
+ chacha20poly1305_enc_vectors[i].ilen,
+ chacha20poly1305_enc_vectors[i].assoc,
+ chacha20poly1305_enc_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce),
+ chacha20poly1305_enc_vectors[i].key);
+ if (!ret || memcmp(computed_output,
+ chacha20poly1305_enc_vectors[i].output,
+ chacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ ret = chacha20poly1305_decrypt(computed_output,
+ chacha20poly1305_dec_vectors[i].input,
+ chacha20poly1305_dec_vectors[i].ilen,
+ chacha20poly1305_dec_vectors[i].assoc,
+ chacha20poly1305_dec_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
+ chacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ chacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output,
+ chacha20poly1305_dec_vectors[i].output,
+ chacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
+ memcpy(computed_output, chacha20poly1305_dec_vectors[i].input,
+ chacha20poly1305_dec_vectors[i].ilen);
+ sg_init_one(sg_src, computed_output,
+ chacha20poly1305_dec_vectors[i].ilen);
+ ret = chacha20poly1305_decrypt_sg_inplace(sg_src,
+ chacha20poly1305_dec_vectors[i].ilen,
+ chacha20poly1305_dec_vectors[i].assoc,
+ chacha20poly1305_dec_vectors[i].alen,
+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
+ chacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ chacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output, chacha20poly1305_dec_vectors[i].output,
+ chacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ xchacha20poly1305_encrypt(computed_output,
+ xchacha20poly1305_enc_vectors[i].input,
+ xchacha20poly1305_enc_vectors[i].ilen,
+ xchacha20poly1305_enc_vectors[i].assoc,
+ xchacha20poly1305_enc_vectors[i].alen,
+ xchacha20poly1305_enc_vectors[i].nonce,
+ xchacha20poly1305_enc_vectors[i].key);
+ if (memcmp(computed_output,
+ xchacha20poly1305_enc_vectors[i].output,
+ xchacha20poly1305_enc_vectors[i].ilen +
+ POLY1305_DIGEST_SIZE)) {
+ pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) {
+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
+ ret = xchacha20poly1305_decrypt(computed_output,
+ xchacha20poly1305_dec_vectors[i].input,
+ xchacha20poly1305_dec_vectors[i].ilen,
+ xchacha20poly1305_dec_vectors[i].assoc,
+ xchacha20poly1305_dec_vectors[i].alen,
+ xchacha20poly1305_dec_vectors[i].nonce,
+ xchacha20poly1305_dec_vectors[i].key);
+ if (!decryption_success(ret,
+ xchacha20poly1305_dec_vectors[i].failure,
+ memcmp(computed_output,
+ xchacha20poly1305_dec_vectors[i].output,
+ xchacha20poly1305_dec_vectors[i].ilen -
+ POLY1305_DIGEST_SIZE))) {
+ pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n",
+ i + 1);
+ success = false;
+ }
+ }
+
+ for (total_len = POLY1305_DIGEST_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST)
+ && total_len <= 1 << 10; ++total_len) {
+ for (i = 0; i <= total_len; ++i) {
+ for (j = i; j <= total_len; ++j) {
+ k = 0;
+ sg_init_table(sg_src, 3);
+ if (i)
+ sg_set_buf(&sg_src[k++], input, i);
+ if (j - i)
+ sg_set_buf(&sg_src[k++], input + i, j - i);
+ if (total_len - j)
+ sg_set_buf(&sg_src[k++], input + j, total_len - j);
+ sg_init_marker(sg_src, k);
+ memset(computed_output, 0, total_len);
+ memset(input, 0, total_len);
+
+ if (!chacha20poly1305_encrypt_sg_inplace(sg_src,
+ total_len - POLY1305_DIGEST_SIZE, NULL, 0,
+ 0, enc_key001))
+ goto chunkfail;
+ chacha20poly1305_encrypt(computed_output,
+ computed_output,
+ total_len - POLY1305_DIGEST_SIZE, NULL, 0, 0,
+ enc_key001);
+ if (memcmp(computed_output, input, total_len))
+ goto chunkfail;
+ if (!chacha20poly1305_decrypt(computed_output,
+ input, total_len, NULL, 0, 0, enc_key001))
+ goto chunkfail;
+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
+ if (computed_output[k])
+ goto chunkfail;
+ }
+ if (!chacha20poly1305_decrypt_sg_inplace(sg_src,
+ total_len, NULL, 0, 0, enc_key001))
+ goto chunkfail;
+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
+ if (input[k])
+ goto chunkfail;
+ }
+ continue;
+
+ chunkfail:
+ pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n",
+ total_len, i, j);
+ success = false;
+ }
+
+ }
+ }
+
+out:
+ kfree(computed_output);
+ kfree(input);
+ return success;
+}
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
new file mode 100644
index 000000000..fa6a9440f
--- /dev/null
+++ b/lib/crypto/chacha20poly1305.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the ChaCha20Poly1305 AEAD construction.
+ *
+ * Information: https://tools.ietf.org/html/rfc8439
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha20poly1305.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
+#include <crypto/scatterwalk.h>
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
+
+static void chacha_load_key(u32 *k, const u8 *in)
+{
+ k[0] = get_unaligned_le32(in);
+ k[1] = get_unaligned_le32(in + 4);
+ k[2] = get_unaligned_le32(in + 8);
+ k[3] = get_unaligned_le32(in + 12);
+ k[4] = get_unaligned_le32(in + 16);
+ k[5] = get_unaligned_le32(in + 20);
+ k[6] = get_unaligned_le32(in + 24);
+ k[7] = get_unaligned_le32(in + 28);
+}
+
+static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce)
+{
+ u32 k[CHACHA_KEY_WORDS];
+ u8 iv[CHACHA_IV_SIZE];
+
+ memset(iv, 0, 8);
+ memcpy(iv + 8, nonce + 16, 8);
+
+ chacha_load_key(k, key);
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ chacha_init(chacha_state, k, nonce);
+ hchacha_block(chacha_state, k, 20);
+
+ chacha_init(chacha_state, k, iv);
+
+ memzero_explicit(k, sizeof(k));
+ memzero_explicit(iv, sizeof(iv));
+}
+
+static void
+__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, u32 *chacha_state)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ __le64 lens[2];
+ } b;
+
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+
+ chacha20_crypt(chacha_state, dst, src, src_len);
+
+ poly1305_update(&poly1305_state, dst, src_len);
+ if (src_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ poly1305_final(&poly1305_state, dst + src_len);
+
+ memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32));
+ memzero_explicit(&b, sizeof(b));
+}
+
+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+
+ chacha_load_key(k, key);
+
+ iv[0] = 0;
+ iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, k, (u8 *)iv);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+
+ memzero_explicit(iv, sizeof(iv));
+ memzero_explicit(k, sizeof(k));
+}
+EXPORT_SYMBOL(chacha20poly1305_encrypt);
+
+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+
+ xchacha_init(chacha_state, key, nonce);
+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
+}
+EXPORT_SYMBOL(xchacha20poly1305_encrypt);
+
+static bool
+__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, u32 *chacha_state)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ size_t dst_len;
+ int ret;
+ union {
+ u8 block0[POLY1305_KEY_SIZE];
+ u8 mac[POLY1305_DIGEST_SIZE];
+ __le64 lens[2];
+ } b;
+
+ if (unlikely(src_len < POLY1305_DIGEST_SIZE))
+ return false;
+
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+
+ dst_len = src_len - POLY1305_DIGEST_SIZE;
+ poly1305_update(&poly1305_state, src, dst_len);
+ if (dst_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (dst_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(dst_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ poly1305_final(&poly1305_state, b.mac);
+
+ ret = crypto_memneq(b.mac, src + dst_len, POLY1305_DIGEST_SIZE);
+ if (likely(!ret))
+ chacha20_crypt(chacha_state, dst, src, dst_len);
+
+ memzero_explicit(&b, sizeof(b));
+
+ return !ret;
+}
+
+bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+ bool ret;
+
+ chacha_load_key(k, key);
+
+ iv[0] = 0;
+ iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, k, (u8 *)iv);
+ ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
+ chacha_state);
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(iv, sizeof(iv));
+ memzero_explicit(k, sizeof(k));
+ return ret;
+}
+EXPORT_SYMBOL(chacha20poly1305_decrypt);
+
+bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+
+ xchacha_init(chacha_state, key, nonce);
+ return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
+ chacha_state);
+}
+EXPORT_SYMBOL(xchacha20poly1305_decrypt);
+
+static
+bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
+ const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE],
+ int encrypt)
+{
+ const u8 *pad0 = page_address(ZERO_PAGE(0));
+ struct poly1305_desc_ctx poly1305_state;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ struct sg_mapping_iter miter;
+ size_t partial = 0;
+ unsigned int flags;
+ bool ret = true;
+ int sl;
+ union {
+ struct {
+ u32 k[CHACHA_KEY_WORDS];
+ __le64 iv[2];
+ };
+ u8 block0[POLY1305_KEY_SIZE];
+ u8 chacha_stream[CHACHA_BLOCK_SIZE];
+ struct {
+ u8 mac[2][POLY1305_DIGEST_SIZE];
+ };
+ __le64 lens[2];
+ } b __aligned(16);
+
+ if (WARN_ON(src_len > INT_MAX))
+ return false;
+
+ chacha_load_key(b.k, key);
+
+ b.iv[0] = 0;
+ b.iv[1] = cpu_to_le64(nonce);
+
+ chacha_init(chacha_state, b.k, (u8 *)b.iv);
+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
+ poly1305_init(&poly1305_state, b.block0);
+
+ if (unlikely(ad_len)) {
+ poly1305_update(&poly1305_state, ad, ad_len);
+ if (ad_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
+ }
+
+ flags = SG_MITER_TO_SG | SG_MITER_ATOMIC;
+
+ sg_miter_start(&miter, src, sg_nents(src), flags);
+
+ for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) {
+ u8 *addr = miter.addr;
+ size_t length = min_t(size_t, sl, miter.length);
+
+ if (!encrypt)
+ poly1305_update(&poly1305_state, addr, length);
+
+ if (unlikely(partial)) {
+ size_t l = min(length, CHACHA_BLOCK_SIZE - partial);
+
+ crypto_xor(addr, b.chacha_stream + partial, l);
+ partial = (partial + l) & (CHACHA_BLOCK_SIZE - 1);
+
+ addr += l;
+ length -= l;
+ }
+
+ if (likely(length >= CHACHA_BLOCK_SIZE || length == sl)) {
+ size_t l = length;
+
+ if (unlikely(length < sl))
+ l &= ~(CHACHA_BLOCK_SIZE - 1);
+ chacha20_crypt(chacha_state, addr, addr, l);
+ addr += l;
+ length -= l;
+ }
+
+ if (unlikely(length > 0)) {
+ chacha20_crypt(chacha_state, b.chacha_stream, pad0,
+ CHACHA_BLOCK_SIZE);
+ crypto_xor(addr, b.chacha_stream, length);
+ partial = length;
+ }
+
+ if (encrypt)
+ poly1305_update(&poly1305_state, miter.addr,
+ min_t(size_t, sl, miter.length));
+ }
+
+ if (src_len & 0xf)
+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
+
+ b.lens[0] = cpu_to_le64(ad_len);
+ b.lens[1] = cpu_to_le64(src_len);
+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
+
+ if (likely(sl <= -POLY1305_DIGEST_SIZE)) {
+ if (encrypt) {
+ poly1305_final(&poly1305_state,
+ miter.addr + miter.length + sl);
+ ret = true;
+ } else {
+ poly1305_final(&poly1305_state, b.mac[0]);
+ ret = !crypto_memneq(b.mac[0],
+ miter.addr + miter.length + sl,
+ POLY1305_DIGEST_SIZE);
+ }
+ }
+
+ sg_miter_stop(&miter);
+
+ if (unlikely(sl > -POLY1305_DIGEST_SIZE)) {
+ poly1305_final(&poly1305_state, b.mac[1]);
+ scatterwalk_map_and_copy(b.mac[encrypt], src, src_len,
+ sizeof(b.mac[1]), encrypt);
+ ret = encrypt ||
+ !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE);
+ }
+
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ memzero_explicit(&b, sizeof(b));
+
+ return ret;
+}
+
+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ return chacha20poly1305_crypt_sg_inplace(src, src_len, ad, ad_len,
+ nonce, key, 1);
+}
+EXPORT_SYMBOL(chacha20poly1305_encrypt_sg_inplace);
+
+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE])
+{
+ if (unlikely(src_len < POLY1305_DIGEST_SIZE))
+ return false;
+
+ return chacha20poly1305_crypt_sg_inplace(src,
+ src_len - POLY1305_DIGEST_SIZE,
+ ad, ad_len, nonce, key, 0);
+}
+EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
+
+static int __init chacha20poly1305_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!chacha20poly1305_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit chacha20poly1305_exit(void)
+{
+}
+
+module_init(chacha20poly1305_init);
+module_exit(chacha20poly1305_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519-fiat32.c b/lib/crypto/curve25519-fiat32.c
new file mode 100644
index 000000000..2fde0ec33
--- /dev/null
+++ b/lib/crypto/curve25519-fiat32.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2016 The fiat-crypto Authors.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is a machine-generated formally verified implementation of Curve25519
+ * ECDH from: <https://github.com/mit-plv/fiat-crypto>. Though originally
+ * machine generated, it has been tweaked to be suitable for use in the kernel.
+ * It is optimized for 32-bit machines and machines that cannot work efficiently
+ * with 128-bit integer types.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/curve25519.h>
+#include <linux/string.h>
+
+/* fe means field element. Here the field is \Z/(2^255-19). An element t,
+ * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+ * t[3]+2^102 t[4]+...+2^230 t[9].
+ * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc.
+ * Multiplication and carrying produce fe from fe_loose.
+ */
+typedef struct fe { u32 v[10]; } fe;
+
+/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc
+ * Addition and subtraction produce fe_loose from (fe, fe).
+ */
+typedef struct fe_loose { u32 v[10]; } fe_loose;
+
+static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s)
+{
+ /* Ignores top bit of s. */
+ u32 a0 = get_unaligned_le32(s);
+ u32 a1 = get_unaligned_le32(s+4);
+ u32 a2 = get_unaligned_le32(s+8);
+ u32 a3 = get_unaligned_le32(s+12);
+ u32 a4 = get_unaligned_le32(s+16);
+ u32 a5 = get_unaligned_le32(s+20);
+ u32 a6 = get_unaligned_le32(s+24);
+ u32 a7 = get_unaligned_le32(s+28);
+ h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */
+ h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */
+ h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */
+ h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */
+ h[4] = (a3>> 6); /* (32- 6) = 26 */
+ h[5] = a4&((1<<25)-1); /* 25 */
+ h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */
+ h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */
+ h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */
+ h[9] = (a7>> 6)&((1<<25)-1); /* 25 */
+}
+
+static __always_inline void fe_frombytes(fe *h, const u8 *s)
+{
+ fe_frombytes_impl(h->v, s);
+}
+
+static __always_inline u8 /*bool*/
+addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 25 bits of result and 1 bit of carry
+ * (26 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a + b + c;
+ *low = x & ((1 << 25) - 1);
+ return (x >> 25) & 1;
+}
+
+static __always_inline u8 /*bool*/
+addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 26 bits of result and 1 bit of carry
+ * (27 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a + b + c;
+ *low = x & ((1 << 26) - 1);
+ return (x >> 26) & 1;
+}
+
+static __always_inline u8 /*bool*/
+subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 25 bits of result and 1 bit of borrow
+ * (26 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a - b - c;
+ *low = x & ((1 << 25) - 1);
+ return x >> 31;
+}
+
+static __always_inline u8 /*bool*/
+subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
+{
+ /* This function extracts 26 bits of result and 1 bit of borrow
+ *(27 total), so a 32-bit intermediate is sufficient.
+ */
+ u32 x = a - b - c;
+ *low = x & ((1 << 26) - 1);
+ return x >> 31;
+}
+
+static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz)
+{
+ t = -!!t; /* all set if nonzero, 0 if 0 */
+ return (t&nz) | ((~t)&z);
+}
+
+static __always_inline void fe_freeze(u32 out[10], const u32 in1[10])
+{
+ { const u32 x17 = in1[9];
+ { const u32 x18 = in1[8];
+ { const u32 x16 = in1[7];
+ { const u32 x14 = in1[6];
+ { const u32 x12 = in1[5];
+ { const u32 x10 = in1[4];
+ { const u32 x8 = in1[3];
+ { const u32 x6 = in1[2];
+ { const u32 x4 = in1[1];
+ { const u32 x2 = in1[0];
+ { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20);
+ { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23);
+ { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26);
+ { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29);
+ { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32);
+ { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35);
+ { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38);
+ { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41);
+ { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44);
+ { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47);
+ { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff);
+ { u32 x50 = (x49 & 0x3ffffed);
+ { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52);
+ { u32 x54 = (x49 & 0x1ffffff);
+ { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56);
+ { u32 x58 = (x49 & 0x3ffffff);
+ { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60);
+ { u32 x62 = (x49 & 0x1ffffff);
+ { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64);
+ { u32 x66 = (x49 & 0x3ffffff);
+ { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68);
+ { u32 x70 = (x49 & 0x1ffffff);
+ { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72);
+ { u32 x74 = (x49 & 0x3ffffff);
+ { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76);
+ { u32 x78 = (x49 & 0x1ffffff);
+ { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80);
+ { u32 x82 = (x49 & 0x3ffffff);
+ { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84);
+ { u32 x86 = (x49 & 0x1ffffff);
+ { u32 x88; addcarryx_u25(x85, x47, x86, &x88);
+ out[0] = x52;
+ out[1] = x56;
+ out[2] = x60;
+ out[3] = x64;
+ out[4] = x68;
+ out[5] = x72;
+ out[6] = x76;
+ out[7] = x80;
+ out[8] = x84;
+ out[9] = x88;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_tobytes(u8 s[32], const fe *f)
+{
+ u32 h[10];
+ fe_freeze(h, f->v);
+ s[0] = h[0] >> 0;
+ s[1] = h[0] >> 8;
+ s[2] = h[0] >> 16;
+ s[3] = (h[0] >> 24) | (h[1] << 2);
+ s[4] = h[1] >> 6;
+ s[5] = h[1] >> 14;
+ s[6] = (h[1] >> 22) | (h[2] << 3);
+ s[7] = h[2] >> 5;
+ s[8] = h[2] >> 13;
+ s[9] = (h[2] >> 21) | (h[3] << 5);
+ s[10] = h[3] >> 3;
+ s[11] = h[3] >> 11;
+ s[12] = (h[3] >> 19) | (h[4] << 6);
+ s[13] = h[4] >> 2;
+ s[14] = h[4] >> 10;
+ s[15] = h[4] >> 18;
+ s[16] = h[5] >> 0;
+ s[17] = h[5] >> 8;
+ s[18] = h[5] >> 16;
+ s[19] = (h[5] >> 24) | (h[6] << 1);
+ s[20] = h[6] >> 7;
+ s[21] = h[6] >> 15;
+ s[22] = (h[6] >> 23) | (h[7] << 3);
+ s[23] = h[7] >> 5;
+ s[24] = h[7] >> 13;
+ s[25] = (h[7] >> 21) | (h[8] << 4);
+ s[26] = h[8] >> 4;
+ s[27] = h[8] >> 12;
+ s[28] = (h[8] >> 20) | (h[9] << 6);
+ s[29] = h[9] >> 2;
+ s[30] = h[9] >> 10;
+ s[31] = h[9] >> 18;
+}
+
+/* h = f */
+static __always_inline void fe_copy(fe *h, const fe *f)
+{
+ memmove(h, f, sizeof(u32) * 10);
+}
+
+static __always_inline void fe_copy_lt(fe_loose *h, const fe *f)
+{
+ memmove(h, f, sizeof(u32) * 10);
+}
+
+/* h = 0 */
+static __always_inline void fe_0(fe *h)
+{
+ memset(h, 0, sizeof(u32) * 10);
+}
+
+/* h = 1 */
+static __always_inline void fe_1(fe *h)
+{
+ memset(h, 0, sizeof(u32) * 10);
+ h->v[0] = 1;
+}
+
+static noinline void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ out[0] = (x5 + x23);
+ out[1] = (x7 + x25);
+ out[2] = (x9 + x27);
+ out[3] = (x11 + x29);
+ out[4] = (x13 + x31);
+ out[5] = (x15 + x33);
+ out[6] = (x17 + x35);
+ out[7] = (x19 + x37);
+ out[8] = (x21 + x39);
+ out[9] = (x20 + x38);
+ }}}}}}}}}}}}}}}}}}}}
+}
+
+/* h = f + g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_add_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ out[0] = ((0x7ffffda + x5) - x23);
+ out[1] = ((0x3fffffe + x7) - x25);
+ out[2] = ((0x7fffffe + x9) - x27);
+ out[3] = ((0x3fffffe + x11) - x29);
+ out[4] = ((0x7fffffe + x13) - x31);
+ out[5] = ((0x3fffffe + x15) - x33);
+ out[6] = ((0x7fffffe + x17) - x35);
+ out[7] = ((0x3fffffe + x19) - x37);
+ out[8] = ((0x7fffffe + x21) - x39);
+ out[9] = ((0x3fffffe + x20) - x38);
+ }}}}}}}}}}}}}}}}}}}}
+}
+
+/* h = f - g
+ * Can overlap h with f or g.
+ */
+static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g)
+{
+ fe_sub_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = in2[9];
+ { const u32 x39 = in2[8];
+ { const u32 x37 = in2[7];
+ { const u32 x35 = in2[6];
+ { const u32 x33 = in2[5];
+ { const u32 x31 = in2[4];
+ { const u32 x29 = in2[3];
+ { const u32 x27 = in2[2];
+ { const u32 x25 = in2[1];
+ { const u32 x23 = in2[0];
+ { u64 x40 = ((u64)x23 * x5);
+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
+ { u64 x58 = ((u64)(0x2 * x38) * x20);
+ { u64 x59 = (x48 + (x58 << 0x4));
+ { u64 x60 = (x59 + (x58 << 0x1));
+ { u64 x61 = (x60 + x58);
+ { u64 x62 = (x47 + (x57 << 0x4));
+ { u64 x63 = (x62 + (x57 << 0x1));
+ { u64 x64 = (x63 + x57);
+ { u64 x65 = (x46 + (x56 << 0x4));
+ { u64 x66 = (x65 + (x56 << 0x1));
+ { u64 x67 = (x66 + x56);
+ { u64 x68 = (x45 + (x55 << 0x4));
+ { u64 x69 = (x68 + (x55 << 0x1));
+ { u64 x70 = (x69 + x55);
+ { u64 x71 = (x44 + (x54 << 0x4));
+ { u64 x72 = (x71 + (x54 << 0x1));
+ { u64 x73 = (x72 + x54);
+ { u64 x74 = (x43 + (x53 << 0x4));
+ { u64 x75 = (x74 + (x53 << 0x1));
+ { u64 x76 = (x75 + x53);
+ { u64 x77 = (x42 + (x52 << 0x4));
+ { u64 x78 = (x77 + (x52 << 0x1));
+ { u64 x79 = (x78 + x52);
+ { u64 x80 = (x41 + (x51 << 0x4));
+ { u64 x81 = (x80 + (x51 << 0x1));
+ { u64 x82 = (x81 + x51);
+ { u64 x83 = (x40 + (x50 << 0x4));
+ { u64 x84 = (x83 + (x50 << 0x1));
+ { u64 x85 = (x84 + x50);
+ { u64 x86 = (x85 >> 0x1a);
+ { u32 x87 = ((u32)x85 & 0x3ffffff);
+ { u64 x88 = (x86 + x82);
+ { u64 x89 = (x88 >> 0x19);
+ { u32 x90 = ((u32)x88 & 0x1ffffff);
+ { u64 x91 = (x89 + x79);
+ { u64 x92 = (x91 >> 0x1a);
+ { u32 x93 = ((u32)x91 & 0x3ffffff);
+ { u64 x94 = (x92 + x76);
+ { u64 x95 = (x94 >> 0x19);
+ { u32 x96 = ((u32)x94 & 0x1ffffff);
+ { u64 x97 = (x95 + x73);
+ { u64 x98 = (x97 >> 0x1a);
+ { u32 x99 = ((u32)x97 & 0x3ffffff);
+ { u64 x100 = (x98 + x70);
+ { u64 x101 = (x100 >> 0x19);
+ { u32 x102 = ((u32)x100 & 0x1ffffff);
+ { u64 x103 = (x101 + x67);
+ { u64 x104 = (x103 >> 0x1a);
+ { u32 x105 = ((u32)x103 & 0x3ffffff);
+ { u64 x106 = (x104 + x64);
+ { u64 x107 = (x106 >> 0x19);
+ { u32 x108 = ((u32)x106 & 0x1ffffff);
+ { u64 x109 = (x107 + x61);
+ { u64 x110 = (x109 >> 0x1a);
+ { u32 x111 = ((u32)x109 & 0x3ffffff);
+ { u64 x112 = (x110 + x49);
+ { u64 x113 = (x112 >> 0x19);
+ { u32 x114 = ((u32)x112 & 0x1ffffff);
+ { u64 x115 = (x87 + (0x13 * x113));
+ { u32 x116 = (u32) (x115 >> 0x1a);
+ { u32 x117 = ((u32)x115 & 0x3ffffff);
+ { u32 x118 = (x116 + x90);
+ { u32 x119 = (x118 >> 0x19);
+ { u32 x120 = (x118 & 0x1ffffff);
+ out[0] = x117;
+ out[1] = x120;
+ out[2] = (x119 + x93);
+ out[3] = x96;
+ out[4] = x99;
+ out[5] = x102;
+ out[6] = x105;
+ out[7] = x108;
+ out[8] = x111;
+ out[9] = x114;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static __always_inline void
+fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g)
+{
+ fe_mul_impl(h->v, f->v, g->v);
+}
+
+static noinline void fe_sqr_impl(u32 out[10], const u32 in1[10])
+{
+ { const u32 x17 = in1[9];
+ { const u32 x18 = in1[8];
+ { const u32 x16 = in1[7];
+ { const u32 x14 = in1[6];
+ { const u32 x12 = in1[5];
+ { const u32 x10 = in1[4];
+ { const u32 x8 = in1[3];
+ { const u32 x6 = in1[2];
+ { const u32 x4 = in1[1];
+ { const u32 x2 = in1[0];
+ { u64 x19 = ((u64)x2 * x2);
+ { u64 x20 = ((u64)(0x2 * x2) * x4);
+ { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6)));
+ { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8)));
+ { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10));
+ { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12)));
+ { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12)));
+ { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16)));
+ { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12))))));
+ { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17)));
+ { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17)))));
+ { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17)));
+ { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17))))));
+ { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17)));
+ { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17)));
+ { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17)));
+ { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17));
+ { u64 x36 = ((u64)(0x2 * x18) * x17);
+ { u64 x37 = ((u64)(0x2 * x17) * x17);
+ { u64 x38 = (x27 + (x37 << 0x4));
+ { u64 x39 = (x38 + (x37 << 0x1));
+ { u64 x40 = (x39 + x37);
+ { u64 x41 = (x26 + (x36 << 0x4));
+ { u64 x42 = (x41 + (x36 << 0x1));
+ { u64 x43 = (x42 + x36);
+ { u64 x44 = (x25 + (x35 << 0x4));
+ { u64 x45 = (x44 + (x35 << 0x1));
+ { u64 x46 = (x45 + x35);
+ { u64 x47 = (x24 + (x34 << 0x4));
+ { u64 x48 = (x47 + (x34 << 0x1));
+ { u64 x49 = (x48 + x34);
+ { u64 x50 = (x23 + (x33 << 0x4));
+ { u64 x51 = (x50 + (x33 << 0x1));
+ { u64 x52 = (x51 + x33);
+ { u64 x53 = (x22 + (x32 << 0x4));
+ { u64 x54 = (x53 + (x32 << 0x1));
+ { u64 x55 = (x54 + x32);
+ { u64 x56 = (x21 + (x31 << 0x4));
+ { u64 x57 = (x56 + (x31 << 0x1));
+ { u64 x58 = (x57 + x31);
+ { u64 x59 = (x20 + (x30 << 0x4));
+ { u64 x60 = (x59 + (x30 << 0x1));
+ { u64 x61 = (x60 + x30);
+ { u64 x62 = (x19 + (x29 << 0x4));
+ { u64 x63 = (x62 + (x29 << 0x1));
+ { u64 x64 = (x63 + x29);
+ { u64 x65 = (x64 >> 0x1a);
+ { u32 x66 = ((u32)x64 & 0x3ffffff);
+ { u64 x67 = (x65 + x61);
+ { u64 x68 = (x67 >> 0x19);
+ { u32 x69 = ((u32)x67 & 0x1ffffff);
+ { u64 x70 = (x68 + x58);
+ { u64 x71 = (x70 >> 0x1a);
+ { u32 x72 = ((u32)x70 & 0x3ffffff);
+ { u64 x73 = (x71 + x55);
+ { u64 x74 = (x73 >> 0x19);
+ { u32 x75 = ((u32)x73 & 0x1ffffff);
+ { u64 x76 = (x74 + x52);
+ { u64 x77 = (x76 >> 0x1a);
+ { u32 x78 = ((u32)x76 & 0x3ffffff);
+ { u64 x79 = (x77 + x49);
+ { u64 x80 = (x79 >> 0x19);
+ { u32 x81 = ((u32)x79 & 0x1ffffff);
+ { u64 x82 = (x80 + x46);
+ { u64 x83 = (x82 >> 0x1a);
+ { u32 x84 = ((u32)x82 & 0x3ffffff);
+ { u64 x85 = (x83 + x43);
+ { u64 x86 = (x85 >> 0x19);
+ { u32 x87 = ((u32)x85 & 0x1ffffff);
+ { u64 x88 = (x86 + x40);
+ { u64 x89 = (x88 >> 0x1a);
+ { u32 x90 = ((u32)x88 & 0x3ffffff);
+ { u64 x91 = (x89 + x28);
+ { u64 x92 = (x91 >> 0x19);
+ { u32 x93 = ((u32)x91 & 0x1ffffff);
+ { u64 x94 = (x66 + (0x13 * x92));
+ { u32 x95 = (u32) (x94 >> 0x1a);
+ { u32 x96 = ((u32)x94 & 0x3ffffff);
+ { u32 x97 = (x95 + x69);
+ { u32 x98 = (x97 >> 0x19);
+ { u32 x99 = (x97 & 0x1ffffff);
+ out[0] = x96;
+ out[1] = x99;
+ out[2] = (x98 + x72);
+ out[3] = x75;
+ out[4] = x78;
+ out[5] = x81;
+ out[6] = x84;
+ out[7] = x87;
+ out[8] = x90;
+ out[9] = x93;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_sq_tl(fe *h, const fe_loose *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_sq_tt(fe *h, const fe *f)
+{
+ fe_sqr_impl(h->v, f->v);
+}
+
+static __always_inline void fe_loose_invert(fe *out, const fe_loose *z)
+{
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+ fe_sq_tl(&t0, z);
+ fe_sq_tt(&t1, &t0);
+ for (i = 1; i < 2; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_tlt(&t1, z, &t1);
+ fe_mul_ttt(&t0, &t0, &t1);
+ fe_sq_tt(&t2, &t0);
+ fe_mul_ttt(&t1, &t1, &t2);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 20; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 10; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t2, &t1);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t2, &t2, &t1);
+ fe_sq_tt(&t3, &t2);
+ for (i = 1; i < 100; ++i)
+ fe_sq_tt(&t3, &t3);
+ fe_mul_ttt(&t2, &t3, &t2);
+ fe_sq_tt(&t2, &t2);
+ for (i = 1; i < 50; ++i)
+ fe_sq_tt(&t2, &t2);
+ fe_mul_ttt(&t1, &t2, &t1);
+ fe_sq_tt(&t1, &t1);
+ for (i = 1; i < 5; ++i)
+ fe_sq_tt(&t1, &t1);
+ fe_mul_ttt(out, &t1, &t0);
+}
+
+static __always_inline void fe_invert(fe *out, const fe *z)
+{
+ fe_loose l;
+ fe_copy_lt(&l, z);
+ fe_loose_invert(out, &l);
+}
+
+/* Replace (f,g) with (g,f) if b == 1;
+ * replace (f,g) with (f,g) if b == 0.
+ *
+ * Preconditions: b in {0,1}
+ */
+static noinline void fe_cswap(fe *f, fe *g, unsigned int b)
+{
+ unsigned i;
+ b = 0 - b;
+ for (i = 0; i < 10; i++) {
+ u32 x = f->v[i] ^ g->v[i];
+ x &= b;
+ f->v[i] ^= x;
+ g->v[i] ^= x;
+ }
+}
+
+/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/
+static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10])
+{
+ { const u32 x20 = in1[9];
+ { const u32 x21 = in1[8];
+ { const u32 x19 = in1[7];
+ { const u32 x17 = in1[6];
+ { const u32 x15 = in1[5];
+ { const u32 x13 = in1[4];
+ { const u32 x11 = in1[3];
+ { const u32 x9 = in1[2];
+ { const u32 x7 = in1[1];
+ { const u32 x5 = in1[0];
+ { const u32 x38 = 0;
+ { const u32 x39 = 0;
+ { const u32 x37 = 0;
+ { const u32 x35 = 0;
+ { const u32 x33 = 0;
+ { const u32 x31 = 0;
+ { const u32 x29 = 0;
+ { const u32 x27 = 0;
+ { const u32 x25 = 0;
+ { const u32 x23 = 121666;
+ { u64 x40 = ((u64)x23 * x5);
+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
+ { u64 x58 = ((u64)(0x2 * x38) * x20);
+ { u64 x59 = (x48 + (x58 << 0x4));
+ { u64 x60 = (x59 + (x58 << 0x1));
+ { u64 x61 = (x60 + x58);
+ { u64 x62 = (x47 + (x57 << 0x4));
+ { u64 x63 = (x62 + (x57 << 0x1));
+ { u64 x64 = (x63 + x57);
+ { u64 x65 = (x46 + (x56 << 0x4));
+ { u64 x66 = (x65 + (x56 << 0x1));
+ { u64 x67 = (x66 + x56);
+ { u64 x68 = (x45 + (x55 << 0x4));
+ { u64 x69 = (x68 + (x55 << 0x1));
+ { u64 x70 = (x69 + x55);
+ { u64 x71 = (x44 + (x54 << 0x4));
+ { u64 x72 = (x71 + (x54 << 0x1));
+ { u64 x73 = (x72 + x54);
+ { u64 x74 = (x43 + (x53 << 0x4));
+ { u64 x75 = (x74 + (x53 << 0x1));
+ { u64 x76 = (x75 + x53);
+ { u64 x77 = (x42 + (x52 << 0x4));
+ { u64 x78 = (x77 + (x52 << 0x1));
+ { u64 x79 = (x78 + x52);
+ { u64 x80 = (x41 + (x51 << 0x4));
+ { u64 x81 = (x80 + (x51 << 0x1));
+ { u64 x82 = (x81 + x51);
+ { u64 x83 = (x40 + (x50 << 0x4));
+ { u64 x84 = (x83 + (x50 << 0x1));
+ { u64 x85 = (x84 + x50);
+ { u64 x86 = (x85 >> 0x1a);
+ { u32 x87 = ((u32)x85 & 0x3ffffff);
+ { u64 x88 = (x86 + x82);
+ { u64 x89 = (x88 >> 0x19);
+ { u32 x90 = ((u32)x88 & 0x1ffffff);
+ { u64 x91 = (x89 + x79);
+ { u64 x92 = (x91 >> 0x1a);
+ { u32 x93 = ((u32)x91 & 0x3ffffff);
+ { u64 x94 = (x92 + x76);
+ { u64 x95 = (x94 >> 0x19);
+ { u32 x96 = ((u32)x94 & 0x1ffffff);
+ { u64 x97 = (x95 + x73);
+ { u64 x98 = (x97 >> 0x1a);
+ { u32 x99 = ((u32)x97 & 0x3ffffff);
+ { u64 x100 = (x98 + x70);
+ { u64 x101 = (x100 >> 0x19);
+ { u32 x102 = ((u32)x100 & 0x1ffffff);
+ { u64 x103 = (x101 + x67);
+ { u64 x104 = (x103 >> 0x1a);
+ { u32 x105 = ((u32)x103 & 0x3ffffff);
+ { u64 x106 = (x104 + x64);
+ { u64 x107 = (x106 >> 0x19);
+ { u32 x108 = ((u32)x106 & 0x1ffffff);
+ { u64 x109 = (x107 + x61);
+ { u64 x110 = (x109 >> 0x1a);
+ { u32 x111 = ((u32)x109 & 0x3ffffff);
+ { u64 x112 = (x110 + x49);
+ { u64 x113 = (x112 >> 0x19);
+ { u32 x114 = ((u32)x112 & 0x1ffffff);
+ { u64 x115 = (x87 + (0x13 * x113));
+ { u32 x116 = (u32) (x115 >> 0x1a);
+ { u32 x117 = ((u32)x115 & 0x3ffffff);
+ { u32 x118 = (x116 + x90);
+ { u32 x119 = (x118 >> 0x19);
+ { u32 x120 = (x118 & 0x1ffffff);
+ out[0] = x117;
+ out[1] = x120;
+ out[2] = (x119 + x93);
+ out[3] = x96;
+ out[4] = x99;
+ out[5] = x102;
+ out[6] = x105;
+ out[7] = x108;
+ out[8] = x111;
+ out[9] = x114;
+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
+}
+
+static __always_inline void fe_mul121666(fe *h, const fe_loose *f)
+{
+ fe_mul_121666_impl(h->v, f->v);
+}
+
+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE])
+{
+ fe x1, x2, z2, x3, z3;
+ fe_loose x2l, z2l, x3l;
+ unsigned swap = 0;
+ int pos;
+ u8 e[32];
+
+ memcpy(e, scalar, 32);
+ curve25519_clamp_secret(e);
+
+ /* The following implementation was transcribed to Coq and proven to
+ * correspond to unary scalar multiplication in affine coordinates given
+ * that x1 != 0 is the x coordinate of some point on the curve. It was
+ * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives
+ * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was
+ * quantified over the underlying field, so it applies to Curve25519
+ * itself and the quadratic twist of Curve25519. It was not proven in
+ * Coq that prime-field arithmetic correctly simulates extension-field
+ * arithmetic on prime-field values. The decoding of the byte array
+ * representation of e was not considered.
+ *
+ * Specification of Montgomery curves in affine coordinates:
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27>
+ *
+ * Proof that these form a group that is isomorphic to a Weierstrass
+ * curve:
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35>
+ *
+ * Coq transcription and correctness proof of the loop
+ * (where scalarbits=255):
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118>
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278>
+ * preconditions: 0 <= e < 2^255 (not necessarily e < order),
+ * fe_invert(0) = 0
+ */
+ fe_frombytes(&x1, point);
+ fe_1(&x2);
+ fe_0(&z2);
+ fe_copy(&x3, &x1);
+ fe_1(&z3);
+
+ for (pos = 254; pos >= 0; --pos) {
+ fe tmp0, tmp1;
+ fe_loose tmp0l, tmp1l;
+ /* loop invariant as of right before the test, for the case
+ * where x1 != 0:
+ * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3
+ * is nonzero
+ * let r := e >> (pos+1) in the following equalities of
+ * projective points:
+ * to_xz (r*P) === if swap then (x3, z3) else (x2, z2)
+ * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3)
+ * x1 is the nonzero x coordinate of the nonzero
+ * point (r*P-(r+1)*P)
+ */
+ unsigned b = 1 & (e[pos / 8] >> (pos & 7));
+ swap ^= b;
+ fe_cswap(&x2, &x3, swap);
+ fe_cswap(&z2, &z3, swap);
+ swap = b;
+ /* Coq transcription of ladderstep formula (called from
+ * transcribed loop):
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89>
+ * <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131>
+ * x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217>
+ * x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147>
+ */
+ fe_sub(&tmp0l, &x3, &z3);
+ fe_sub(&tmp1l, &x2, &z2);
+ fe_add(&x2l, &x2, &z2);
+ fe_add(&z2l, &x3, &z3);
+ fe_mul_tll(&z3, &tmp0l, &x2l);
+ fe_mul_tll(&z2, &z2l, &tmp1l);
+ fe_sq_tl(&tmp0, &tmp1l);
+ fe_sq_tl(&tmp1, &x2l);
+ fe_add(&x3l, &z3, &z2);
+ fe_sub(&z2l, &z3, &z2);
+ fe_mul_ttt(&x2, &tmp1, &tmp0);
+ fe_sub(&tmp1l, &tmp1, &tmp0);
+ fe_sq_tl(&z2, &z2l);
+ fe_mul121666(&z3, &tmp1l);
+ fe_sq_tl(&x3, &x3l);
+ fe_add(&tmp0l, &tmp0, &z3);
+ fe_mul_ttt(&z3, &x1, &z2);
+ fe_mul_tll(&z2, &tmp1l, &tmp0l);
+ }
+ /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3)
+ * else (x2, z2)
+ */
+ fe_cswap(&x2, &x3, swap);
+ fe_cswap(&z2, &z3, swap);
+
+ fe_invert(&z2, &z2);
+ fe_mul_ttt(&x2, &x2, &z2);
+ fe_tobytes(out, &x2);
+
+ memzero_explicit(&x1, sizeof(x1));
+ memzero_explicit(&x2, sizeof(x2));
+ memzero_explicit(&z2, sizeof(z2));
+ memzero_explicit(&x3, sizeof(x3));
+ memzero_explicit(&z3, sizeof(z3));
+ memzero_explicit(&x2l, sizeof(x2l));
+ memzero_explicit(&z2l, sizeof(z2l));
+ memzero_explicit(&x3l, sizeof(x3l));
+ memzero_explicit(&e, sizeof(e));
+}
diff --git a/lib/crypto/curve25519-generic.c b/lib/crypto/curve25519-generic.c
new file mode 100644
index 000000000..de7c99172
--- /dev/null
+++ b/lib/crypto/curve25519-generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the Curve25519 ECDH algorithm, using either
+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers,
+ * depending on what is supported by the target compiler.
+ *
+ * Information: https://cr.yp.to/ecdh.html
+ */
+
+#include <crypto/curve25519.h>
+#include <linux/module.h>
+
+const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
+const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
+
+EXPORT_SYMBOL(curve25519_null_point);
+EXPORT_SYMBOL(curve25519_base_point);
+EXPORT_SYMBOL(curve25519_generic);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Curve25519 scalar multiplication");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519-hacl64.c b/lib/crypto/curve25519-hacl64.c
new file mode 100644
index 000000000..c40e5d913
--- /dev/null
+++ b/lib/crypto/curve25519-hacl64.c
@@ -0,0 +1,786 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2016-2017 INRIA and Microsoft Corporation.
+ * Copyright (C) 2018-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is a machine-generated formally verified implementation of Curve25519
+ * ECDH from: <https://github.com/mitls/hacl-star>. Though originally machine
+ * generated, it has been tweaked to be suitable for use in the kernel. It is
+ * optimized for 64-bit machines that can efficiently work with 128-bit
+ * integer types.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/curve25519.h>
+#include <linux/string.h>
+
+static __always_inline u64 u64_eq_mask(u64 a, u64 b)
+{
+ u64 x = a ^ b;
+ u64 minus_x = ~x + (u64)1U;
+ u64 x_or_minus_x = x | minus_x;
+ u64 xnx = x_or_minus_x >> (u32)63U;
+ u64 c = xnx - (u64)1U;
+ return c;
+}
+
+static __always_inline u64 u64_gte_mask(u64 a, u64 b)
+{
+ u64 x = a;
+ u64 y = b;
+ u64 x_xor_y = x ^ y;
+ u64 x_sub_y = x - y;
+ u64 x_sub_y_xor_y = x_sub_y ^ y;
+ u64 q = x_xor_y | x_sub_y_xor_y;
+ u64 x_xor_q = x ^ q;
+ u64 x_xor_q_ = x_xor_q >> (u32)63U;
+ u64 c = x_xor_q_ - (u64)1U;
+ return c;
+}
+
+static __always_inline void modulo_carry_top(u64 *b)
+{
+ u64 b4 = b[4];
+ u64 b0 = b[0];
+ u64 b4_ = b4 & 0x7ffffffffffffLLU;
+ u64 b0_ = b0 + 19 * (b4 >> 51);
+ b[4] = b4_;
+ b[0] = b0_;
+}
+
+static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input)
+{
+ {
+ u128 xi = input[0];
+ output[0] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[1];
+ output[1] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[2];
+ output[2] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[3];
+ output[3] = ((u64)(xi));
+ }
+ {
+ u128 xi = input[4];
+ output[4] = ((u64)(xi));
+ }
+}
+
+static __always_inline void
+fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s)
+{
+ output[0] += (u128)input[0] * s;
+ output[1] += (u128)input[1] * s;
+ output[2] += (u128)input[2] * s;
+ output[3] += (u128)input[3] * s;
+ output[4] += (u128)input[4] * s;
+}
+
+static __always_inline void fproduct_carry_wide_(u128 *tmp)
+{
+ {
+ u32 ctr = 0;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+ {
+ u32 ctr = 1;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+
+ {
+ u32 ctr = 2;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+ {
+ u32 ctr = 3;
+ u128 tctr = tmp[ctr];
+ u128 tctrp1 = tmp[ctr + 1];
+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
+ u128 c = ((tctr) >> (51));
+ tmp[ctr] = ((u128)(r0));
+ tmp[ctr + 1] = ((tctrp1) + (c));
+ }
+}
+
+static __always_inline void fmul_shift_reduce(u64 *output)
+{
+ u64 tmp = output[4];
+ u64 b0;
+ {
+ u32 ctr = 5 - 0 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 1 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 2 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ {
+ u32 ctr = 5 - 3 - 1;
+ u64 z = output[ctr - 1];
+ output[ctr] = z;
+ }
+ output[0] = tmp;
+ b0 = output[0];
+ output[0] = 19 * b0;
+}
+
+static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input,
+ u64 *input21)
+{
+ u32 i;
+ u64 input2i;
+ {
+ u64 input2i = input21[0];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[1];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[2];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ {
+ u64 input2i = input21[3];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+ fmul_shift_reduce(input);
+ }
+ i = 4;
+ input2i = input21[i];
+ fproduct_sum_scalar_multiplication_(output, input, input2i);
+}
+
+static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21)
+{
+ u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] };
+ {
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ u128 t[5] = { 0 };
+ fmul_mul_shift_reduce_(t, tmp, input21);
+ fproduct_carry_wide_(t);
+ b4 = t[4];
+ b0 = t[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ t[4] = b4_;
+ t[0] = b0_;
+ fproduct_copy_from_wide_(output, t);
+ i0 = output[0];
+ i1 = output[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ output[0] = i0_;
+ output[1] = i1_;
+ }
+}
+
+static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output)
+{
+ u64 r0 = output[0];
+ u64 r1 = output[1];
+ u64 r2 = output[2];
+ u64 r3 = output[3];
+ u64 r4 = output[4];
+ u64 d0 = r0 * 2;
+ u64 d1 = r1 * 2;
+ u64 d2 = r2 * 2 * 19;
+ u64 d419 = r4 * 19;
+ u64 d4 = d419 * 2;
+ u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) +
+ (((u128)(d2) * (r3))));
+ u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) +
+ (((u128)(r3 * 19) * (r3))));
+ u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) +
+ (((u128)(d4) * (r3))));
+ u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) +
+ (((u128)(r4) * (d419))));
+ u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) +
+ (((u128)(r2) * (r2))));
+ tmp[0] = s0;
+ tmp[1] = s1;
+ tmp[2] = s2;
+ tmp[3] = s3;
+ tmp[4] = s4;
+}
+
+static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output)
+{
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ fsquare_fsquare__(tmp, output);
+ fproduct_carry_wide_(tmp);
+ b4 = tmp[4];
+ b0 = tmp[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ fproduct_copy_from_wide_(output, tmp);
+ i0 = output[0];
+ i1 = output[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ output[0] = i0_;
+ output[1] = i1_;
+}
+
+static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp,
+ u32 count1)
+{
+ u32 i;
+ fsquare_fsquare_(tmp, output);
+ for (i = 1; i < count1; ++i)
+ fsquare_fsquare_(tmp, output);
+}
+
+static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input,
+ u32 count1)
+{
+ u128 t[5];
+ memcpy(output, input, 5 * sizeof(*input));
+ fsquare_fsquare_times_(output, t, count1);
+}
+
+static __always_inline void fsquare_fsquare_times_inplace(u64 *output,
+ u32 count1)
+{
+ u128 t[5];
+ fsquare_fsquare_times_(output, t, count1);
+}
+
+static __always_inline void crecip_crecip(u64 *out, u64 *z)
+{
+ u64 buf[20] = { 0 };
+ u64 *a0 = buf;
+ u64 *t00 = buf + 5;
+ u64 *b0 = buf + 10;
+ u64 *t01;
+ u64 *b1;
+ u64 *c0;
+ u64 *a;
+ u64 *t0;
+ u64 *b;
+ u64 *c;
+ fsquare_fsquare_times(a0, z, 1);
+ fsquare_fsquare_times(t00, a0, 2);
+ fmul_fmul(b0, t00, z);
+ fmul_fmul(a0, b0, a0);
+ fsquare_fsquare_times(t00, a0, 1);
+ fmul_fmul(b0, t00, b0);
+ fsquare_fsquare_times(t00, b0, 5);
+ t01 = buf + 5;
+ b1 = buf + 10;
+ c0 = buf + 15;
+ fmul_fmul(b1, t01, b1);
+ fsquare_fsquare_times(t01, b1, 10);
+ fmul_fmul(c0, t01, b1);
+ fsquare_fsquare_times(t01, c0, 20);
+ fmul_fmul(t01, t01, c0);
+ fsquare_fsquare_times_inplace(t01, 10);
+ fmul_fmul(b1, t01, b1);
+ fsquare_fsquare_times(t01, b1, 50);
+ a = buf;
+ t0 = buf + 5;
+ b = buf + 10;
+ c = buf + 15;
+ fmul_fmul(c, t0, b);
+ fsquare_fsquare_times(t0, c, 100);
+ fmul_fmul(t0, t0, c);
+ fsquare_fsquare_times_inplace(t0, 50);
+ fmul_fmul(t0, t0, b);
+ fsquare_fsquare_times_inplace(t0, 5);
+ fmul_fmul(out, t0, a);
+}
+
+static __always_inline void fsum(u64 *a, u64 *b)
+{
+ a[0] += b[0];
+ a[1] += b[1];
+ a[2] += b[2];
+ a[3] += b[3];
+ a[4] += b[4];
+}
+
+static __always_inline void fdifference(u64 *a, u64 *b)
+{
+ u64 tmp[5] = { 0 };
+ u64 b0;
+ u64 b1;
+ u64 b2;
+ u64 b3;
+ u64 b4;
+ memcpy(tmp, b, 5 * sizeof(*b));
+ b0 = tmp[0];
+ b1 = tmp[1];
+ b2 = tmp[2];
+ b3 = tmp[3];
+ b4 = tmp[4];
+ tmp[0] = b0 + 0x3fffffffffff68LLU;
+ tmp[1] = b1 + 0x3ffffffffffff8LLU;
+ tmp[2] = b2 + 0x3ffffffffffff8LLU;
+ tmp[3] = b3 + 0x3ffffffffffff8LLU;
+ tmp[4] = b4 + 0x3ffffffffffff8LLU;
+ {
+ u64 xi = a[0];
+ u64 yi = tmp[0];
+ a[0] = yi - xi;
+ }
+ {
+ u64 xi = a[1];
+ u64 yi = tmp[1];
+ a[1] = yi - xi;
+ }
+ {
+ u64 xi = a[2];
+ u64 yi = tmp[2];
+ a[2] = yi - xi;
+ }
+ {
+ u64 xi = a[3];
+ u64 yi = tmp[3];
+ a[3] = yi - xi;
+ }
+ {
+ u64 xi = a[4];
+ u64 yi = tmp[4];
+ a[4] = yi - xi;
+ }
+}
+
+static __always_inline void fscalar(u64 *output, u64 *b, u64 s)
+{
+ u128 tmp[5];
+ u128 b4;
+ u128 b0;
+ u128 b4_;
+ u128 b0_;
+ {
+ u64 xi = b[0];
+ tmp[0] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[1];
+ tmp[1] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[2];
+ tmp[2] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[3];
+ tmp[3] = ((u128)(xi) * (s));
+ }
+ {
+ u64 xi = b[4];
+ tmp[4] = ((u128)(xi) * (s));
+ }
+ fproduct_carry_wide_(tmp);
+ b4 = tmp[4];
+ b0 = tmp[0];
+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
+ tmp[4] = b4_;
+ tmp[0] = b0_;
+ fproduct_copy_from_wide_(output, tmp);
+}
+
+static __always_inline void fmul(u64 *output, u64 *a, u64 *b)
+{
+ fmul_fmul(output, a, b);
+}
+
+static __always_inline void crecip(u64 *output, u64 *input)
+{
+ crecip_crecip(output, input);
+}
+
+static __always_inline void point_swap_conditional_step(u64 *a, u64 *b,
+ u64 swap1, u32 ctr)
+{
+ u32 i = ctr - 1;
+ u64 ai = a[i];
+ u64 bi = b[i];
+ u64 x = swap1 & (ai ^ bi);
+ u64 ai1 = ai ^ x;
+ u64 bi1 = bi ^ x;
+ a[i] = ai1;
+ b[i] = bi1;
+}
+
+static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1)
+{
+ point_swap_conditional_step(a, b, swap1, 5);
+ point_swap_conditional_step(a, b, swap1, 4);
+ point_swap_conditional_step(a, b, swap1, 3);
+ point_swap_conditional_step(a, b, swap1, 2);
+ point_swap_conditional_step(a, b, swap1, 1);
+}
+
+static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap)
+{
+ u64 swap1 = 0 - iswap;
+ point_swap_conditional5(a, b, swap1);
+ point_swap_conditional5(a + 5, b + 5, swap1);
+}
+
+static __always_inline void point_copy(u64 *output, u64 *input)
+{
+ memcpy(output, input, 5 * sizeof(*input));
+ memcpy(output + 5, input + 5, 5 * sizeof(*input));
+}
+
+static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p,
+ u64 *pq, u64 *qmqp)
+{
+ u64 *qx = qmqp;
+ u64 *x2 = pp;
+ u64 *z2 = pp + 5;
+ u64 *x3 = ppq;
+ u64 *z3 = ppq + 5;
+ u64 *x = p;
+ u64 *z = p + 5;
+ u64 *xprime = pq;
+ u64 *zprime = pq + 5;
+ u64 buf[40] = { 0 };
+ u64 *origx = buf;
+ u64 *origxprime0 = buf + 5;
+ u64 *xxprime0;
+ u64 *zzprime0;
+ u64 *origxprime;
+ xxprime0 = buf + 25;
+ zzprime0 = buf + 30;
+ memcpy(origx, x, 5 * sizeof(*x));
+ fsum(x, z);
+ fdifference(z, origx);
+ memcpy(origxprime0, xprime, 5 * sizeof(*xprime));
+ fsum(xprime, zprime);
+ fdifference(zprime, origxprime0);
+ fmul(xxprime0, xprime, z);
+ fmul(zzprime0, x, zprime);
+ origxprime = buf + 5;
+ {
+ u64 *xx0;
+ u64 *zz0;
+ u64 *xxprime;
+ u64 *zzprime;
+ u64 *zzzprime;
+ xx0 = buf + 15;
+ zz0 = buf + 20;
+ xxprime = buf + 25;
+ zzprime = buf + 30;
+ zzzprime = buf + 35;
+ memcpy(origxprime, xxprime, 5 * sizeof(*xxprime));
+ fsum(xxprime, zzprime);
+ fdifference(zzprime, origxprime);
+ fsquare_fsquare_times(x3, xxprime, 1);
+ fsquare_fsquare_times(zzzprime, zzprime, 1);
+ fmul(z3, zzzprime, qx);
+ fsquare_fsquare_times(xx0, x, 1);
+ fsquare_fsquare_times(zz0, z, 1);
+ {
+ u64 *zzz;
+ u64 *xx;
+ u64 *zz;
+ u64 scalar;
+ zzz = buf + 10;
+ xx = buf + 15;
+ zz = buf + 20;
+ fmul(x2, xx, zz);
+ fdifference(zz, xx);
+ scalar = 121665;
+ fscalar(zzz, zz, scalar);
+ fsum(zzz, xx);
+ fmul(z2, zzz, zz);
+ }
+ }
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
+ u64 *q, u8 byt)
+{
+ u64 bit0 = (u64)(byt >> 7);
+ u64 bit;
+ point_swap_conditional(nq, nqpq, bit0);
+ addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q);
+ bit = (u64)(byt >> 7);
+ point_swap_conditional(nq2, nqpq2, bit);
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2,
+ u64 *nqpq2, u64 *q, u8 byt)
+{
+ u8 byt1;
+ ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
+ byt1 = byt << 1;
+ ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
+}
+
+static __always_inline void
+ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
+ u64 *q, u8 byt, u32 i)
+{
+ while (i--) {
+ ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2,
+ nqpq2, q, byt);
+ byt <<= 2;
+ }
+}
+
+static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq,
+ u64 *nqpq, u64 *nq2,
+ u64 *nqpq2, u64 *q,
+ u32 i)
+{
+ while (i--) {
+ u8 byte = n1[i];
+ ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q,
+ byte, 4);
+ }
+}
+
+static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
+{
+ u64 point_buf[40] = { 0 };
+ u64 *nq = point_buf;
+ u64 *nqpq = point_buf + 10;
+ u64 *nq2 = point_buf + 20;
+ u64 *nqpq2 = point_buf + 30;
+ point_copy(nqpq, q);
+ nq[0] = 1;
+ ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32);
+ point_copy(result, nq);
+}
+
+static __always_inline void format_fexpand(u64 *output, const u8 *input)
+{
+ const u8 *x00 = input + 6;
+ const u8 *x01 = input + 12;
+ const u8 *x02 = input + 19;
+ const u8 *x0 = input + 24;
+ u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4;
+ i0 = get_unaligned_le64(input);
+ i1 = get_unaligned_le64(x00);
+ i2 = get_unaligned_le64(x01);
+ i3 = get_unaligned_le64(x02);
+ i4 = get_unaligned_le64(x0);
+ output0 = i0 & 0x7ffffffffffffLLU;
+ output1 = i1 >> 3 & 0x7ffffffffffffLLU;
+ output2 = i2 >> 6 & 0x7ffffffffffffLLU;
+ output3 = i3 >> 1 & 0x7ffffffffffffLLU;
+ output4 = i4 >> 12 & 0x7ffffffffffffLLU;
+ output[0] = output0;
+ output[1] = output1;
+ output[2] = output2;
+ output[3] = output3;
+ output[4] = output4;
+}
+
+static __always_inline void format_fcontract_first_carry_pass(u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 t1_ = t1 + (t0 >> 51);
+ u64 t0_ = t0 & 0x7ffffffffffffLLU;
+ u64 t2_ = t2 + (t1_ >> 51);
+ u64 t1__ = t1_ & 0x7ffffffffffffLLU;
+ u64 t3_ = t3 + (t2_ >> 51);
+ u64 t2__ = t2_ & 0x7ffffffffffffLLU;
+ u64 t4_ = t4 + (t3_ >> 51);
+ u64 t3__ = t3_ & 0x7ffffffffffffLLU;
+ input[0] = t0_;
+ input[1] = t1__;
+ input[2] = t2__;
+ input[3] = t3__;
+ input[4] = t4_;
+}
+
+static __always_inline void format_fcontract_first_carry_full(u64 *input)
+{
+ format_fcontract_first_carry_pass(input);
+ modulo_carry_top(input);
+}
+
+static __always_inline void format_fcontract_second_carry_pass(u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 t1_ = t1 + (t0 >> 51);
+ u64 t0_ = t0 & 0x7ffffffffffffLLU;
+ u64 t2_ = t2 + (t1_ >> 51);
+ u64 t1__ = t1_ & 0x7ffffffffffffLLU;
+ u64 t3_ = t3 + (t2_ >> 51);
+ u64 t2__ = t2_ & 0x7ffffffffffffLLU;
+ u64 t4_ = t4 + (t3_ >> 51);
+ u64 t3__ = t3_ & 0x7ffffffffffffLLU;
+ input[0] = t0_;
+ input[1] = t1__;
+ input[2] = t2__;
+ input[3] = t3__;
+ input[4] = t4_;
+}
+
+static __always_inline void format_fcontract_second_carry_full(u64 *input)
+{
+ u64 i0;
+ u64 i1;
+ u64 i0_;
+ u64 i1_;
+ format_fcontract_second_carry_pass(input);
+ modulo_carry_top(input);
+ i0 = input[0];
+ i1 = input[1];
+ i0_ = i0 & 0x7ffffffffffffLLU;
+ i1_ = i1 + (i0 >> 51);
+ input[0] = i0_;
+ input[1] = i1_;
+}
+
+static __always_inline void format_fcontract_trim(u64 *input)
+{
+ u64 a0 = input[0];
+ u64 a1 = input[1];
+ u64 a2 = input[2];
+ u64 a3 = input[3];
+ u64 a4 = input[4];
+ u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU);
+ u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU);
+ u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU);
+ u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU);
+ u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU);
+ u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
+ u64 a0_ = a0 - (0x7ffffffffffedLLU & mask);
+ u64 a1_ = a1 - (0x7ffffffffffffLLU & mask);
+ u64 a2_ = a2 - (0x7ffffffffffffLLU & mask);
+ u64 a3_ = a3 - (0x7ffffffffffffLLU & mask);
+ u64 a4_ = a4 - (0x7ffffffffffffLLU & mask);
+ input[0] = a0_;
+ input[1] = a1_;
+ input[2] = a2_;
+ input[3] = a3_;
+ input[4] = a4_;
+}
+
+static __always_inline void format_fcontract_store(u8 *output, u64 *input)
+{
+ u64 t0 = input[0];
+ u64 t1 = input[1];
+ u64 t2 = input[2];
+ u64 t3 = input[3];
+ u64 t4 = input[4];
+ u64 o0 = t1 << 51 | t0;
+ u64 o1 = t2 << 38 | t1 >> 13;
+ u64 o2 = t3 << 25 | t2 >> 26;
+ u64 o3 = t4 << 12 | t3 >> 39;
+ u8 *b0 = output;
+ u8 *b1 = output + 8;
+ u8 *b2 = output + 16;
+ u8 *b3 = output + 24;
+ put_unaligned_le64(o0, b0);
+ put_unaligned_le64(o1, b1);
+ put_unaligned_le64(o2, b2);
+ put_unaligned_le64(o3, b3);
+}
+
+static __always_inline void format_fcontract(u8 *output, u64 *input)
+{
+ format_fcontract_first_carry_full(input);
+ format_fcontract_second_carry_full(input);
+ format_fcontract_trim(input);
+ format_fcontract_store(output, input);
+}
+
+static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point)
+{
+ u64 *x = point;
+ u64 *z = point + 5;
+ u64 buf[10] __aligned(32) = { 0 };
+ u64 *zmone = buf;
+ u64 *sc = buf + 5;
+ crecip(zmone, z);
+ fmul(sc, x, zmone);
+ format_fcontract(scalar, sc);
+}
+
+void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE])
+{
+ u64 buf0[10] __aligned(32) = { 0 };
+ u64 *x0 = buf0;
+ u64 *z = buf0 + 5;
+ u64 *q;
+ format_fexpand(x0, basepoint);
+ z[0] = 1;
+ q = buf0;
+ {
+ u8 e[32] __aligned(32) = { 0 };
+ u8 *scalar;
+ memcpy(e, secret, 32);
+ curve25519_clamp_secret(e);
+ scalar = e;
+ {
+ u64 buf[15] = { 0 };
+ u64 *nq = buf;
+ u64 *x = nq;
+ x[0] = 1;
+ ladder_cmult(nq, scalar, q);
+ format_scalar_of_point(mypublic, nq);
+ memzero_explicit(buf, sizeof(buf));
+ }
+ memzero_explicit(e, sizeof(e));
+ }
+ memzero_explicit(buf0, sizeof(buf0));
+}
diff --git a/lib/crypto/curve25519-selftest.c b/lib/crypto/curve25519-selftest.c
new file mode 100644
index 000000000..c85e85381
--- /dev/null
+++ b/lib/crypto/curve25519-selftest.c
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <crypto/curve25519.h>
+
+struct curve25519_test_vector {
+ u8 private[CURVE25519_KEY_SIZE];
+ u8 public[CURVE25519_KEY_SIZE];
+ u8 result[CURVE25519_KEY_SIZE];
+ bool valid;
+};
+static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = {
+ {
+ .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
+ 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45,
+ 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a,
+ 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a },
+ .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4,
+ 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37,
+ 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d,
+ 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f },
+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .valid = true
+ },
+ {
+ .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b,
+ 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6,
+ 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd,
+ 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb },
+ .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54,
+ 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a,
+ 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4,
+ 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a },
+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
+ .valid = true
+ },
+ {
+ .private = { 1 },
+ .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64,
+ 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d,
+ 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98,
+ 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f },
+ .valid = true
+ },
+ {
+ .private = { 1 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f,
+ 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d,
+ 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3,
+ 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 },
+ .valid = true
+ },
+ {
+ .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 },
+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .valid = true
+ },
+ {
+ .private = { 1, 2, 3, 4 },
+ .public = { 0 },
+ .result = { 0 },
+ .valid = false
+ },
+ {
+ .private = { 2, 4, 6, 8 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 },
+ .result = { 0 },
+ .valid = false
+ },
+ {
+ .private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f },
+ .result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2,
+ 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57,
+ 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05,
+ 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 },
+ .valid = true
+ },
+ {
+ .private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 },
+ .result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d,
+ 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12,
+ 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99,
+ 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c },
+ .valid = true
+ },
+ /* wycheproof - normal case */
+ {
+ .private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda,
+ 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66,
+ 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3,
+ 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba },
+ .public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5,
+ 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9,
+ 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e,
+ 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a },
+ .result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5,
+ 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38,
+ 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e,
+ 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4,
+ 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5,
+ 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49,
+ 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 },
+ .public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5,
+ 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8,
+ 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3,
+ 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 },
+ .result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff,
+ 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d,
+ 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe,
+ 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9,
+ 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39,
+ 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5,
+ 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 },
+ .public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f,
+ 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b,
+ 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c,
+ 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 },
+ .result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53,
+ 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57,
+ 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0,
+ 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc,
+ 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d,
+ 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67,
+ 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c },
+ .public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97,
+ 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f,
+ 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45,
+ 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a },
+ .result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93,
+ 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2,
+ 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44,
+ 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1,
+ 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95,
+ 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99,
+ 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d },
+ .public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27,
+ 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07,
+ 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae,
+ 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c },
+ .result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73,
+ 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2,
+ 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f,
+ 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 },
+ .valid = true
+ },
+ /* wycheproof - public key on twist */
+ {
+ .private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9,
+ 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd,
+ 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b,
+ 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 },
+ .public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5,
+ 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52,
+ 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8,
+ 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 },
+ .result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86,
+ 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4,
+ 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6,
+ 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 },
+ .valid = true
+ },
+ /* wycheproof - public key = 0 */
+ {
+ .private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11,
+ 0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac,
+ 0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b,
+ 0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key = 1 */
+ {
+ .private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61,
+ 0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea,
+ 0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f,
+ 0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab },
+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04,
+ 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77,
+ 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90,
+ 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 },
+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97,
+ 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9,
+ 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7,
+ 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36,
+ 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd,
+ 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c,
+ 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 },
+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e,
+ 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b,
+ 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e,
+ 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed,
+ 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e,
+ 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd,
+ 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 },
+ .public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 },
+ .result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f,
+ 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1,
+ 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10,
+ 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3,
+ 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d,
+ 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00,
+ 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 },
+ .public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00,
+ 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f },
+ .result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8,
+ 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4,
+ 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70,
+ 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3,
+ 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a,
+ 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e,
+ 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57,
+ 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c,
+ 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59,
+ 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - edge case on twist */
+ {
+ .private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f,
+ 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42,
+ 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9,
+ 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 },
+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c,
+ 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5,
+ 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65,
+ 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6,
+ 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4,
+ 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8,
+ 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe },
+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7,
+ 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca,
+ 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f,
+ 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa,
+ 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3,
+ 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52,
+ 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
+ .result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3,
+ 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e,
+ 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75,
+ 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26,
+ 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea,
+ 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00,
+ 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8,
+ 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32,
+ 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87,
+ 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c,
+ 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6,
+ 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb,
+ 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 },
+ .public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff,
+ 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff,
+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff,
+ 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f },
+ .result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85,
+ 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f,
+ 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0,
+ 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38,
+ 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b,
+ 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c,
+ 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b,
+ 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81,
+ 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3,
+ 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d,
+ 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42,
+ 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98,
+ 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f },
+ .result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c,
+ 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9,
+ 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89,
+ 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 },
+ .valid = true
+ },
+ /* wycheproof - edge case for public key */
+ {
+ .private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29,
+ 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6,
+ 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c,
+ 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f },
+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75,
+ 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89,
+ 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c,
+ 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f },
+ .valid = true
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30,
+ 0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69,
+ 0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14,
+ 0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3,
+ 0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b,
+ 0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef,
+ 0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 },
+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20,
+ 0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf,
+ 0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43,
+ 0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 },
+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f,
+ 0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65,
+ 0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06,
+ 0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 },
+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe,
+ 0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9,
+ 0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f,
+ 0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f },
+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8,
+ 0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85,
+ 0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c,
+ 0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c },
+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8,
+ 0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d,
+ 0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0,
+ 0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 },
+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a,
+ 0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b,
+ 0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67,
+ 0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 },
+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46,
+ 0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02,
+ 0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3,
+ 0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 },
+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30,
+ 0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1,
+ 0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6,
+ 0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe },
+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f,
+ 0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77,
+ 0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0,
+ 0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c },
+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key with low order */
+ {
+ .private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e,
+ 0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f,
+ 0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77,
+ 0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b },
+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = false
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc,
+ 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1,
+ 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d,
+ 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae },
+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09,
+ 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde,
+ 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1,
+ 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81,
+ 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a,
+ 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99,
+ 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d },
+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17,
+ 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35,
+ 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55,
+ 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11,
+ 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b,
+ 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9,
+ 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 },
+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53,
+ 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e,
+ 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6,
+ 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78,
+ 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2,
+ 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd,
+ 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb,
+ 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40,
+ 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2,
+ 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9,
+ 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60,
+ 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13,
+ 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 },
+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c,
+ 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3,
+ 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65,
+ 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a,
+ 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7,
+ 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11,
+ 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e },
+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82,
+ 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4,
+ 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c,
+ 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e,
+ 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a,
+ 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d,
+ 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f },
+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
+ .result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2,
+ 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60,
+ 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25,
+ 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb,
+ 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97,
+ 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c,
+ 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 },
+ .public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23,
+ 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8,
+ 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69,
+ 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a,
+ 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23,
+ 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b,
+ 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 },
+ .public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b,
+ 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44,
+ 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37,
+ 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80,
+ 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d,
+ 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b,
+ 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 },
+ .public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63,
+ 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae,
+ 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f,
+ 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0,
+ 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd,
+ 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49,
+ 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 },
+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41,
+ 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0,
+ 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf,
+ 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9,
+ 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa,
+ 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5,
+ 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e },
+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47,
+ 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3,
+ 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b,
+ 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8,
+ 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98,
+ 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0,
+ 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 },
+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0,
+ 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1,
+ 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a,
+ 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02,
+ 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4,
+ 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68,
+ 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d },
+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f,
+ 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2,
+ 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95,
+ 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7,
+ 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06,
+ 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9,
+ 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 },
+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5,
+ 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0,
+ 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80,
+ 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 },
+ .valid = true
+ },
+ /* wycheproof - public key >= p */
+ {
+ .private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd,
+ 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4,
+ 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04,
+ 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 },
+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0,
+ 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac,
+ 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48,
+ 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 },
+ .valid = true
+ },
+ /* wycheproof - RFC 7748 */
+ {
+ .private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 },
+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
+ .valid = true
+ },
+ /* wycheproof - RFC 7748 */
+ {
+ .private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c,
+ 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5,
+ 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4,
+ 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d },
+ .public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3,
+ 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c,
+ 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e,
+ 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 },
+ .result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d,
+ 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8,
+ 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52,
+ 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde,
+ 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8,
+ 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4,
+ 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 },
+ .result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d,
+ 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64,
+ 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd,
+ 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 },
+ .result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8,
+ 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf,
+ 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94,
+ 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d },
+ .result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84,
+ 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62,
+ 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e,
+ 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 },
+ .result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8,
+ 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58,
+ 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02,
+ 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 },
+ .result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9,
+ 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a,
+ 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44,
+ 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b },
+ .result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd,
+ 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22,
+ 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56,
+ 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b },
+ .result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53,
+ 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f,
+ 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18,
+ 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f },
+ .result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55,
+ 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b,
+ 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79,
+ 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f },
+ .result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39,
+ 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c,
+ 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb,
+ 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e },
+ .result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04,
+ 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10,
+ 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58,
+ 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c },
+ .result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3,
+ 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c,
+ 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88,
+ 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 },
+ .result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a,
+ 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49,
+ 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a,
+ 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+ .valid = true
+ },
+ /* wycheproof - edge case for shared secret */
+ {
+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
+ .public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca,
+ 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c,
+ 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb,
+ 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 },
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58,
+ 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7,
+ 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01,
+ 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d },
+ .result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d,
+ 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27,
+ 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b,
+ 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26,
+ 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2,
+ 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44,
+ 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e },
+ .result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6,
+ 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d,
+ 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e,
+ 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61,
+ 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67,
+ 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e,
+ 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c },
+ .result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65,
+ 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce,
+ 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0,
+ 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee,
+ 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d,
+ 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14,
+ 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 },
+ .result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e,
+ 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc,
+ 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5,
+ 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b },
+ .valid = true
+ },
+ /* wycheproof - checking for overflow */
+ {
+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
+ .public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4,
+ 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5,
+ 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c,
+ 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 },
+ .result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b,
+ 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93,
+ 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f,
+ 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 },
+ .valid = true
+ },
+ /* wycheproof - private key == -1 (mod order) */
+ {
+ .private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8,
+ 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 },
+ .public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
+ .valid = true
+ },
+ /* wycheproof - private key == 1 (mod order) on twist */
+ {
+ .private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef,
+ 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f },
+ .public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
+ .valid = true
+ }
+};
+
+bool __init curve25519_selftest(void)
+{
+ bool success = true, ret, ret2;
+ size_t i = 0, j;
+ u8 in[CURVE25519_KEY_SIZE];
+ u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE],
+ out3[CURVE25519_KEY_SIZE];
+
+ for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) {
+ memset(out, 0, CURVE25519_KEY_SIZE);
+ ret = curve25519(out, curve25519_test_vectors[i].private,
+ curve25519_test_vectors[i].public);
+ if (ret != curve25519_test_vectors[i].valid ||
+ memcmp(out, curve25519_test_vectors[i].result,
+ CURVE25519_KEY_SIZE)) {
+ pr_err("curve25519 self-test %zu: FAIL\n", i + 1);
+ success = false;
+ }
+ }
+
+ for (i = 0; i < 5; ++i) {
+ get_random_bytes(in, sizeof(in));
+ ret = curve25519_generate_public(out, in);
+ ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
+ curve25519_generic(out3, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
+ if (ret != ret2 ||
+ memcmp(out, out2, CURVE25519_KEY_SIZE) ||
+ memcmp(out, out3, CURVE25519_KEY_SIZE)) {
+ pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x",
+ i + 1);
+ for (j = CURVE25519_KEY_SIZE; j-- > 0;)
+ printk(KERN_CONT "%02x", in[j]);
+ printk(KERN_CONT "\n");
+ success = false;
+ }
+ }
+
+ return success;
+}
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
new file mode 100644
index 000000000..064b352c6
--- /dev/null
+++ b/lib/crypto/curve25519.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is an implementation of the Curve25519 ECDH algorithm, using either
+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers,
+ * depending on what is supported by the target compiler.
+ *
+ * Information: https://cr.yp.to/ecdh.html
+ */
+
+#include <crypto/curve25519.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int __init curve25519_init(void)
+{
+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
+ WARN_ON(!curve25519_selftest()))
+ return -ENODEV;
+ return 0;
+}
+
+static void __exit curve25519_exit(void)
+{
+}
+
+module_init(curve25519_init);
+module_exit(curve25519_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Curve25519 scalar multiplication");
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/des.c b/lib/crypto/des.c
new file mode 100644
index 000000000..ef5bb8822
--- /dev/null
+++ b/lib/crypto/des.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Cryptographic API.
+ *
+ * DES & Triple DES EDE Cipher Algorithms.
+ *
+ * Copyright (c) 2005 Dag Arne Osvik <da@osvik.no>
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/crypto.h>
+#include <linux/errno.h>
+#include <linux/fips.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include <crypto/des.h>
+#include <crypto/internal/des.h>
+
+#define ROL(x, r) ((x) = rol32((x), (r)))
+#define ROR(x, r) ((x) = ror32((x), (r)))
+
+/* Lookup tables for key expansion */
+
+static const u8 pc1[256] = {
+ 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14,
+ 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54,
+ 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16,
+ 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56,
+ 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c,
+ 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c,
+ 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e,
+ 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e,
+ 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34,
+ 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74,
+ 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36,
+ 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76,
+ 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c,
+ 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c,
+ 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e,
+ 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e,
+ 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94,
+ 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4,
+ 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96,
+ 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6,
+ 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c,
+ 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc,
+ 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e,
+ 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde,
+ 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4,
+ 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4,
+ 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6,
+ 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6,
+ 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc,
+ 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc,
+ 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe,
+ 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe
+};
+
+static const u8 rs[256] = {
+ 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82,
+ 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86,
+ 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a,
+ 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e,
+ 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92,
+ 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96,
+ 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a,
+ 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e,
+ 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2,
+ 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6,
+ 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa,
+ 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae,
+ 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2,
+ 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6,
+ 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba,
+ 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe,
+ 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2,
+ 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6,
+ 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca,
+ 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce,
+ 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2,
+ 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6,
+ 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda,
+ 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde,
+ 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2,
+ 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6,
+ 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea,
+ 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee,
+ 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2,
+ 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6,
+ 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa,
+ 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe
+};
+
+static const u32 pc2[1024] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00040000, 0x00000000, 0x04000000, 0x00100000,
+ 0x00400000, 0x00000008, 0x00000800, 0x40000000,
+ 0x00440000, 0x00000008, 0x04000800, 0x40100000,
+ 0x00000400, 0x00000020, 0x08000000, 0x00000100,
+ 0x00040400, 0x00000020, 0x0c000000, 0x00100100,
+ 0x00400400, 0x00000028, 0x08000800, 0x40000100,
+ 0x00440400, 0x00000028, 0x0c000800, 0x40100100,
+ 0x80000000, 0x00000010, 0x00000000, 0x00800000,
+ 0x80040000, 0x00000010, 0x04000000, 0x00900000,
+ 0x80400000, 0x00000018, 0x00000800, 0x40800000,
+ 0x80440000, 0x00000018, 0x04000800, 0x40900000,
+ 0x80000400, 0x00000030, 0x08000000, 0x00800100,
+ 0x80040400, 0x00000030, 0x0c000000, 0x00900100,
+ 0x80400400, 0x00000038, 0x08000800, 0x40800100,
+ 0x80440400, 0x00000038, 0x0c000800, 0x40900100,
+ 0x10000000, 0x00000000, 0x00200000, 0x00001000,
+ 0x10040000, 0x00000000, 0x04200000, 0x00101000,
+ 0x10400000, 0x00000008, 0x00200800, 0x40001000,
+ 0x10440000, 0x00000008, 0x04200800, 0x40101000,
+ 0x10000400, 0x00000020, 0x08200000, 0x00001100,
+ 0x10040400, 0x00000020, 0x0c200000, 0x00101100,
+ 0x10400400, 0x00000028, 0x08200800, 0x40001100,
+ 0x10440400, 0x00000028, 0x0c200800, 0x40101100,
+ 0x90000000, 0x00000010, 0x00200000, 0x00801000,
+ 0x90040000, 0x00000010, 0x04200000, 0x00901000,
+ 0x90400000, 0x00000018, 0x00200800, 0x40801000,
+ 0x90440000, 0x00000018, 0x04200800, 0x40901000,
+ 0x90000400, 0x00000030, 0x08200000, 0x00801100,
+ 0x90040400, 0x00000030, 0x0c200000, 0x00901100,
+ 0x90400400, 0x00000038, 0x08200800, 0x40801100,
+ 0x90440400, 0x00000038, 0x0c200800, 0x40901100,
+ 0x00000200, 0x00080000, 0x00000000, 0x00000004,
+ 0x00040200, 0x00080000, 0x04000000, 0x00100004,
+ 0x00400200, 0x00080008, 0x00000800, 0x40000004,
+ 0x00440200, 0x00080008, 0x04000800, 0x40100004,
+ 0x00000600, 0x00080020, 0x08000000, 0x00000104,
+ 0x00040600, 0x00080020, 0x0c000000, 0x00100104,
+ 0x00400600, 0x00080028, 0x08000800, 0x40000104,
+ 0x00440600, 0x00080028, 0x0c000800, 0x40100104,
+ 0x80000200, 0x00080010, 0x00000000, 0x00800004,
+ 0x80040200, 0x00080010, 0x04000000, 0x00900004,
+ 0x80400200, 0x00080018, 0x00000800, 0x40800004,
+ 0x80440200, 0x00080018, 0x04000800, 0x40900004,
+ 0x80000600, 0x00080030, 0x08000000, 0x00800104,
+ 0x80040600, 0x00080030, 0x0c000000, 0x00900104,
+ 0x80400600, 0x00080038, 0x08000800, 0x40800104,
+ 0x80440600, 0x00080038, 0x0c000800, 0x40900104,
+ 0x10000200, 0x00080000, 0x00200000, 0x00001004,
+ 0x10040200, 0x00080000, 0x04200000, 0x00101004,
+ 0x10400200, 0x00080008, 0x00200800, 0x40001004,
+ 0x10440200, 0x00080008, 0x04200800, 0x40101004,
+ 0x10000600, 0x00080020, 0x08200000, 0x00001104,
+ 0x10040600, 0x00080020, 0x0c200000, 0x00101104,
+ 0x10400600, 0x00080028, 0x08200800, 0x40001104,
+ 0x10440600, 0x00080028, 0x0c200800, 0x40101104,
+ 0x90000200, 0x00080010, 0x00200000, 0x00801004,
+ 0x90040200, 0x00080010, 0x04200000, 0x00901004,
+ 0x90400200, 0x00080018, 0x00200800, 0x40801004,
+ 0x90440200, 0x00080018, 0x04200800, 0x40901004,
+ 0x90000600, 0x00080030, 0x08200000, 0x00801104,
+ 0x90040600, 0x00080030, 0x0c200000, 0x00901104,
+ 0x90400600, 0x00080038, 0x08200800, 0x40801104,
+ 0x90440600, 0x00080038, 0x0c200800, 0x40901104,
+ 0x00000002, 0x00002000, 0x20000000, 0x00000001,
+ 0x00040002, 0x00002000, 0x24000000, 0x00100001,
+ 0x00400002, 0x00002008, 0x20000800, 0x40000001,
+ 0x00440002, 0x00002008, 0x24000800, 0x40100001,
+ 0x00000402, 0x00002020, 0x28000000, 0x00000101,
+ 0x00040402, 0x00002020, 0x2c000000, 0x00100101,
+ 0x00400402, 0x00002028, 0x28000800, 0x40000101,
+ 0x00440402, 0x00002028, 0x2c000800, 0x40100101,
+ 0x80000002, 0x00002010, 0x20000000, 0x00800001,
+ 0x80040002, 0x00002010, 0x24000000, 0x00900001,
+ 0x80400002, 0x00002018, 0x20000800, 0x40800001,
+ 0x80440002, 0x00002018, 0x24000800, 0x40900001,
+ 0x80000402, 0x00002030, 0x28000000, 0x00800101,
+ 0x80040402, 0x00002030, 0x2c000000, 0x00900101,
+ 0x80400402, 0x00002038, 0x28000800, 0x40800101,
+ 0x80440402, 0x00002038, 0x2c000800, 0x40900101,
+ 0x10000002, 0x00002000, 0x20200000, 0x00001001,
+ 0x10040002, 0x00002000, 0x24200000, 0x00101001,
+ 0x10400002, 0x00002008, 0x20200800, 0x40001001,
+ 0x10440002, 0x00002008, 0x24200800, 0x40101001,
+ 0x10000402, 0x00002020, 0x28200000, 0x00001101,
+ 0x10040402, 0x00002020, 0x2c200000, 0x00101101,
+ 0x10400402, 0x00002028, 0x28200800, 0x40001101,
+ 0x10440402, 0x00002028, 0x2c200800, 0x40101101,
+ 0x90000002, 0x00002010, 0x20200000, 0x00801001,
+ 0x90040002, 0x00002010, 0x24200000, 0x00901001,
+ 0x90400002, 0x00002018, 0x20200800, 0x40801001,
+ 0x90440002, 0x00002018, 0x24200800, 0x40901001,
+ 0x90000402, 0x00002030, 0x28200000, 0x00801101,
+ 0x90040402, 0x00002030, 0x2c200000, 0x00901101,
+ 0x90400402, 0x00002038, 0x28200800, 0x40801101,
+ 0x90440402, 0x00002038, 0x2c200800, 0x40901101,
+ 0x00000202, 0x00082000, 0x20000000, 0x00000005,
+ 0x00040202, 0x00082000, 0x24000000, 0x00100005,
+ 0x00400202, 0x00082008, 0x20000800, 0x40000005,
+ 0x00440202, 0x00082008, 0x24000800, 0x40100005,
+ 0x00000602, 0x00082020, 0x28000000, 0x00000105,
+ 0x00040602, 0x00082020, 0x2c000000, 0x00100105,
+ 0x00400602, 0x00082028, 0x28000800, 0x40000105,
+ 0x00440602, 0x00082028, 0x2c000800, 0x40100105,
+ 0x80000202, 0x00082010, 0x20000000, 0x00800005,
+ 0x80040202, 0x00082010, 0x24000000, 0x00900005,
+ 0x80400202, 0x00082018, 0x20000800, 0x40800005,
+ 0x80440202, 0x00082018, 0x24000800, 0x40900005,
+ 0x80000602, 0x00082030, 0x28000000, 0x00800105,
+ 0x80040602, 0x00082030, 0x2c000000, 0x00900105,
+ 0x80400602, 0x00082038, 0x28000800, 0x40800105,
+ 0x80440602, 0x00082038, 0x2c000800, 0x40900105,
+ 0x10000202, 0x00082000, 0x20200000, 0x00001005,
+ 0x10040202, 0x00082000, 0x24200000, 0x00101005,
+ 0x10400202, 0x00082008, 0x20200800, 0x40001005,
+ 0x10440202, 0x00082008, 0x24200800, 0x40101005,
+ 0x10000602, 0x00082020, 0x28200000, 0x00001105,
+ 0x10040602, 0x00082020, 0x2c200000, 0x00101105,
+ 0x10400602, 0x00082028, 0x28200800, 0x40001105,
+ 0x10440602, 0x00082028, 0x2c200800, 0x40101105,
+ 0x90000202, 0x00082010, 0x20200000, 0x00801005,
+ 0x90040202, 0x00082010, 0x24200000, 0x00901005,
+ 0x90400202, 0x00082018, 0x20200800, 0x40801005,
+ 0x90440202, 0x00082018, 0x24200800, 0x40901005,
+ 0x90000602, 0x00082030, 0x28200000, 0x00801105,
+ 0x90040602, 0x00082030, 0x2c200000, 0x00901105,
+ 0x90400602, 0x00082038, 0x28200800, 0x40801105,
+ 0x90440602, 0x00082038, 0x2c200800, 0x40901105,
+
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000008, 0x00080000, 0x10000000,
+ 0x02000000, 0x00000000, 0x00000080, 0x00001000,
+ 0x02000000, 0x00000008, 0x00080080, 0x10001000,
+ 0x00004000, 0x00000000, 0x00000040, 0x00040000,
+ 0x00004000, 0x00000008, 0x00080040, 0x10040000,
+ 0x02004000, 0x00000000, 0x000000c0, 0x00041000,
+ 0x02004000, 0x00000008, 0x000800c0, 0x10041000,
+ 0x00020000, 0x00008000, 0x08000000, 0x00200000,
+ 0x00020000, 0x00008008, 0x08080000, 0x10200000,
+ 0x02020000, 0x00008000, 0x08000080, 0x00201000,
+ 0x02020000, 0x00008008, 0x08080080, 0x10201000,
+ 0x00024000, 0x00008000, 0x08000040, 0x00240000,
+ 0x00024000, 0x00008008, 0x08080040, 0x10240000,
+ 0x02024000, 0x00008000, 0x080000c0, 0x00241000,
+ 0x02024000, 0x00008008, 0x080800c0, 0x10241000,
+ 0x00000000, 0x01000000, 0x00002000, 0x00000020,
+ 0x00000000, 0x01000008, 0x00082000, 0x10000020,
+ 0x02000000, 0x01000000, 0x00002080, 0x00001020,
+ 0x02000000, 0x01000008, 0x00082080, 0x10001020,
+ 0x00004000, 0x01000000, 0x00002040, 0x00040020,
+ 0x00004000, 0x01000008, 0x00082040, 0x10040020,
+ 0x02004000, 0x01000000, 0x000020c0, 0x00041020,
+ 0x02004000, 0x01000008, 0x000820c0, 0x10041020,
+ 0x00020000, 0x01008000, 0x08002000, 0x00200020,
+ 0x00020000, 0x01008008, 0x08082000, 0x10200020,
+ 0x02020000, 0x01008000, 0x08002080, 0x00201020,
+ 0x02020000, 0x01008008, 0x08082080, 0x10201020,
+ 0x00024000, 0x01008000, 0x08002040, 0x00240020,
+ 0x00024000, 0x01008008, 0x08082040, 0x10240020,
+ 0x02024000, 0x01008000, 0x080020c0, 0x00241020,
+ 0x02024000, 0x01008008, 0x080820c0, 0x10241020,
+ 0x00000400, 0x04000000, 0x00100000, 0x00000004,
+ 0x00000400, 0x04000008, 0x00180000, 0x10000004,
+ 0x02000400, 0x04000000, 0x00100080, 0x00001004,
+ 0x02000400, 0x04000008, 0x00180080, 0x10001004,
+ 0x00004400, 0x04000000, 0x00100040, 0x00040004,
+ 0x00004400, 0x04000008, 0x00180040, 0x10040004,
+ 0x02004400, 0x04000000, 0x001000c0, 0x00041004,
+ 0x02004400, 0x04000008, 0x001800c0, 0x10041004,
+ 0x00020400, 0x04008000, 0x08100000, 0x00200004,
+ 0x00020400, 0x04008008, 0x08180000, 0x10200004,
+ 0x02020400, 0x04008000, 0x08100080, 0x00201004,
+ 0x02020400, 0x04008008, 0x08180080, 0x10201004,
+ 0x00024400, 0x04008000, 0x08100040, 0x00240004,
+ 0x00024400, 0x04008008, 0x08180040, 0x10240004,
+ 0x02024400, 0x04008000, 0x081000c0, 0x00241004,
+ 0x02024400, 0x04008008, 0x081800c0, 0x10241004,
+ 0x00000400, 0x05000000, 0x00102000, 0x00000024,
+ 0x00000400, 0x05000008, 0x00182000, 0x10000024,
+ 0x02000400, 0x05000000, 0x00102080, 0x00001024,
+ 0x02000400, 0x05000008, 0x00182080, 0x10001024,
+ 0x00004400, 0x05000000, 0x00102040, 0x00040024,
+ 0x00004400, 0x05000008, 0x00182040, 0x10040024,
+ 0x02004400, 0x05000000, 0x001020c0, 0x00041024,
+ 0x02004400, 0x05000008, 0x001820c0, 0x10041024,
+ 0x00020400, 0x05008000, 0x08102000, 0x00200024,
+ 0x00020400, 0x05008008, 0x08182000, 0x10200024,
+ 0x02020400, 0x05008000, 0x08102080, 0x00201024,
+ 0x02020400, 0x05008008, 0x08182080, 0x10201024,
+ 0x00024400, 0x05008000, 0x08102040, 0x00240024,
+ 0x00024400, 0x05008008, 0x08182040, 0x10240024,
+ 0x02024400, 0x05008000, 0x081020c0, 0x00241024,
+ 0x02024400, 0x05008008, 0x081820c0, 0x10241024,
+ 0x00000800, 0x00010000, 0x20000000, 0x00000010,
+ 0x00000800, 0x00010008, 0x20080000, 0x10000010,
+ 0x02000800, 0x00010000, 0x20000080, 0x00001010,
+ 0x02000800, 0x00010008, 0x20080080, 0x10001010,
+ 0x00004800, 0x00010000, 0x20000040, 0x00040010,
+ 0x00004800, 0x00010008, 0x20080040, 0x10040010,
+ 0x02004800, 0x00010000, 0x200000c0, 0x00041010,
+ 0x02004800, 0x00010008, 0x200800c0, 0x10041010,
+ 0x00020800, 0x00018000, 0x28000000, 0x00200010,
+ 0x00020800, 0x00018008, 0x28080000, 0x10200010,
+ 0x02020800, 0x00018000, 0x28000080, 0x00201010,
+ 0x02020800, 0x00018008, 0x28080080, 0x10201010,
+ 0x00024800, 0x00018000, 0x28000040, 0x00240010,
+ 0x00024800, 0x00018008, 0x28080040, 0x10240010,
+ 0x02024800, 0x00018000, 0x280000c0, 0x00241010,
+ 0x02024800, 0x00018008, 0x280800c0, 0x10241010,
+ 0x00000800, 0x01010000, 0x20002000, 0x00000030,
+ 0x00000800, 0x01010008, 0x20082000, 0x10000030,
+ 0x02000800, 0x01010000, 0x20002080, 0x00001030,
+ 0x02000800, 0x01010008, 0x20082080, 0x10001030,
+ 0x00004800, 0x01010000, 0x20002040, 0x00040030,
+ 0x00004800, 0x01010008, 0x20082040, 0x10040030,
+ 0x02004800, 0x01010000, 0x200020c0, 0x00041030,
+ 0x02004800, 0x01010008, 0x200820c0, 0x10041030,
+ 0x00020800, 0x01018000, 0x28002000, 0x00200030,
+ 0x00020800, 0x01018008, 0x28082000, 0x10200030,
+ 0x02020800, 0x01018000, 0x28002080, 0x00201030,
+ 0x02020800, 0x01018008, 0x28082080, 0x10201030,
+ 0x00024800, 0x01018000, 0x28002040, 0x00240030,
+ 0x00024800, 0x01018008, 0x28082040, 0x10240030,
+ 0x02024800, 0x01018000, 0x280020c0, 0x00241030,
+ 0x02024800, 0x01018008, 0x280820c0, 0x10241030,
+ 0x00000c00, 0x04010000, 0x20100000, 0x00000014,
+ 0x00000c00, 0x04010008, 0x20180000, 0x10000014,
+ 0x02000c00, 0x04010000, 0x20100080, 0x00001014,
+ 0x02000c00, 0x04010008, 0x20180080, 0x10001014,
+ 0x00004c00, 0x04010000, 0x20100040, 0x00040014,
+ 0x00004c00, 0x04010008, 0x20180040, 0x10040014,
+ 0x02004c00, 0x04010000, 0x201000c0, 0x00041014,
+ 0x02004c00, 0x04010008, 0x201800c0, 0x10041014,
+ 0x00020c00, 0x04018000, 0x28100000, 0x00200014,
+ 0x00020c00, 0x04018008, 0x28180000, 0x10200014,
+ 0x02020c00, 0x04018000, 0x28100080, 0x00201014,
+ 0x02020c00, 0x04018008, 0x28180080, 0x10201014,
+ 0x00024c00, 0x04018000, 0x28100040, 0x00240014,
+ 0x00024c00, 0x04018008, 0x28180040, 0x10240014,
+ 0x02024c00, 0x04018000, 0x281000c0, 0x00241014,
+ 0x02024c00, 0x04018008, 0x281800c0, 0x10241014,
+ 0x00000c00, 0x05010000, 0x20102000, 0x00000034,
+ 0x00000c00, 0x05010008, 0x20182000, 0x10000034,
+ 0x02000c00, 0x05010000, 0x20102080, 0x00001034,
+ 0x02000c00, 0x05010008, 0x20182080, 0x10001034,
+ 0x00004c00, 0x05010000, 0x20102040, 0x00040034,
+ 0x00004c00, 0x05010008, 0x20182040, 0x10040034,
+ 0x02004c00, 0x05010000, 0x201020c0, 0x00041034,
+ 0x02004c00, 0x05010008, 0x201820c0, 0x10041034,
+ 0x00020c00, 0x05018000, 0x28102000, 0x00200034,
+ 0x00020c00, 0x05018008, 0x28182000, 0x10200034,
+ 0x02020c00, 0x05018000, 0x28102080, 0x00201034,
+ 0x02020c00, 0x05018008, 0x28182080, 0x10201034,
+ 0x00024c00, 0x05018000, 0x28102040, 0x00240034,
+ 0x00024c00, 0x05018008, 0x28182040, 0x10240034,
+ 0x02024c00, 0x05018000, 0x281020c0, 0x00241034,
+ 0x02024c00, 0x05018008, 0x281820c0, 0x10241034
+};
+
+/* S-box lookup tables */
+
+static const u32 S1[64] = {
+ 0x01010400, 0x00000000, 0x00010000, 0x01010404,
+ 0x01010004, 0x00010404, 0x00000004, 0x00010000,
+ 0x00000400, 0x01010400, 0x01010404, 0x00000400,
+ 0x01000404, 0x01010004, 0x01000000, 0x00000004,
+ 0x00000404, 0x01000400, 0x01000400, 0x00010400,
+ 0x00010400, 0x01010000, 0x01010000, 0x01000404,
+ 0x00010004, 0x01000004, 0x01000004, 0x00010004,
+ 0x00000000, 0x00000404, 0x00010404, 0x01000000,
+ 0x00010000, 0x01010404, 0x00000004, 0x01010000,
+ 0x01010400, 0x01000000, 0x01000000, 0x00000400,
+ 0x01010004, 0x00010000, 0x00010400, 0x01000004,
+ 0x00000400, 0x00000004, 0x01000404, 0x00010404,
+ 0x01010404, 0x00010004, 0x01010000, 0x01000404,
+ 0x01000004, 0x00000404, 0x00010404, 0x01010400,
+ 0x00000404, 0x01000400, 0x01000400, 0x00000000,
+ 0x00010004, 0x00010400, 0x00000000, 0x01010004
+};
+
+static const u32 S2[64] = {
+ 0x80108020, 0x80008000, 0x00008000, 0x00108020,
+ 0x00100000, 0x00000020, 0x80100020, 0x80008020,
+ 0x80000020, 0x80108020, 0x80108000, 0x80000000,
+ 0x80008000, 0x00100000, 0x00000020, 0x80100020,
+ 0x00108000, 0x00100020, 0x80008020, 0x00000000,
+ 0x80000000, 0x00008000, 0x00108020, 0x80100000,
+ 0x00100020, 0x80000020, 0x00000000, 0x00108000,
+ 0x00008020, 0x80108000, 0x80100000, 0x00008020,
+ 0x00000000, 0x00108020, 0x80100020, 0x00100000,
+ 0x80008020, 0x80100000, 0x80108000, 0x00008000,
+ 0x80100000, 0x80008000, 0x00000020, 0x80108020,
+ 0x00108020, 0x00000020, 0x00008000, 0x80000000,
+ 0x00008020, 0x80108000, 0x00100000, 0x80000020,
+ 0x00100020, 0x80008020, 0x80000020, 0x00100020,
+ 0x00108000, 0x00000000, 0x80008000, 0x00008020,
+ 0x80000000, 0x80100020, 0x80108020, 0x00108000
+};
+
+static const u32 S3[64] = {
+ 0x00000208, 0x08020200, 0x00000000, 0x08020008,
+ 0x08000200, 0x00000000, 0x00020208, 0x08000200,
+ 0x00020008, 0x08000008, 0x08000008, 0x00020000,
+ 0x08020208, 0x00020008, 0x08020000, 0x00000208,
+ 0x08000000, 0x00000008, 0x08020200, 0x00000200,
+ 0x00020200, 0x08020000, 0x08020008, 0x00020208,
+ 0x08000208, 0x00020200, 0x00020000, 0x08000208,
+ 0x00000008, 0x08020208, 0x00000200, 0x08000000,
+ 0x08020200, 0x08000000, 0x00020008, 0x00000208,
+ 0x00020000, 0x08020200, 0x08000200, 0x00000000,
+ 0x00000200, 0x00020008, 0x08020208, 0x08000200,
+ 0x08000008, 0x00000200, 0x00000000, 0x08020008,
+ 0x08000208, 0x00020000, 0x08000000, 0x08020208,
+ 0x00000008, 0x00020208, 0x00020200, 0x08000008,
+ 0x08020000, 0x08000208, 0x00000208, 0x08020000,
+ 0x00020208, 0x00000008, 0x08020008, 0x00020200
+};
+
+static const u32 S4[64] = {
+ 0x00802001, 0x00002081, 0x00002081, 0x00000080,
+ 0x00802080, 0x00800081, 0x00800001, 0x00002001,
+ 0x00000000, 0x00802000, 0x00802000, 0x00802081,
+ 0x00000081, 0x00000000, 0x00800080, 0x00800001,
+ 0x00000001, 0x00002000, 0x00800000, 0x00802001,
+ 0x00000080, 0x00800000, 0x00002001, 0x00002080,
+ 0x00800081, 0x00000001, 0x00002080, 0x00800080,
+ 0x00002000, 0x00802080, 0x00802081, 0x00000081,
+ 0x00800080, 0x00800001, 0x00802000, 0x00802081,
+ 0x00000081, 0x00000000, 0x00000000, 0x00802000,
+ 0x00002080, 0x00800080, 0x00800081, 0x00000001,
+ 0x00802001, 0x00002081, 0x00002081, 0x00000080,
+ 0x00802081, 0x00000081, 0x00000001, 0x00002000,
+ 0x00800001, 0x00002001, 0x00802080, 0x00800081,
+ 0x00002001, 0x00002080, 0x00800000, 0x00802001,
+ 0x00000080, 0x00800000, 0x00002000, 0x00802080
+};
+
+static const u32 S5[64] = {
+ 0x00000100, 0x02080100, 0x02080000, 0x42000100,
+ 0x00080000, 0x00000100, 0x40000000, 0x02080000,
+ 0x40080100, 0x00080000, 0x02000100, 0x40080100,
+ 0x42000100, 0x42080000, 0x00080100, 0x40000000,
+ 0x02000000, 0x40080000, 0x40080000, 0x00000000,
+ 0x40000100, 0x42080100, 0x42080100, 0x02000100,
+ 0x42080000, 0x40000100, 0x00000000, 0x42000000,
+ 0x02080100, 0x02000000, 0x42000000, 0x00080100,
+ 0x00080000, 0x42000100, 0x00000100, 0x02000000,
+ 0x40000000, 0x02080000, 0x42000100, 0x40080100,
+ 0x02000100, 0x40000000, 0x42080000, 0x02080100,
+ 0x40080100, 0x00000100, 0x02000000, 0x42080000,
+ 0x42080100, 0x00080100, 0x42000000, 0x42080100,
+ 0x02080000, 0x00000000, 0x40080000, 0x42000000,
+ 0x00080100, 0x02000100, 0x40000100, 0x00080000,
+ 0x00000000, 0x40080000, 0x02080100, 0x40000100
+};
+
+static const u32 S6[64] = {
+ 0x20000010, 0x20400000, 0x00004000, 0x20404010,
+ 0x20400000, 0x00000010, 0x20404010, 0x00400000,
+ 0x20004000, 0x00404010, 0x00400000, 0x20000010,
+ 0x00400010, 0x20004000, 0x20000000, 0x00004010,
+ 0x00000000, 0x00400010, 0x20004010, 0x00004000,
+ 0x00404000, 0x20004010, 0x00000010, 0x20400010,
+ 0x20400010, 0x00000000, 0x00404010, 0x20404000,
+ 0x00004010, 0x00404000, 0x20404000, 0x20000000,
+ 0x20004000, 0x00000010, 0x20400010, 0x00404000,
+ 0x20404010, 0x00400000, 0x00004010, 0x20000010,
+ 0x00400000, 0x20004000, 0x20000000, 0x00004010,
+ 0x20000010, 0x20404010, 0x00404000, 0x20400000,
+ 0x00404010, 0x20404000, 0x00000000, 0x20400010,
+ 0x00000010, 0x00004000, 0x20400000, 0x00404010,
+ 0x00004000, 0x00400010, 0x20004010, 0x00000000,
+ 0x20404000, 0x20000000, 0x00400010, 0x20004010
+};
+
+static const u32 S7[64] = {
+ 0x00200000, 0x04200002, 0x04000802, 0x00000000,
+ 0x00000800, 0x04000802, 0x00200802, 0x04200800,
+ 0x04200802, 0x00200000, 0x00000000, 0x04000002,
+ 0x00000002, 0x04000000, 0x04200002, 0x00000802,
+ 0x04000800, 0x00200802, 0x00200002, 0x04000800,
+ 0x04000002, 0x04200000, 0x04200800, 0x00200002,
+ 0x04200000, 0x00000800, 0x00000802, 0x04200802,
+ 0x00200800, 0x00000002, 0x04000000, 0x00200800,
+ 0x04000000, 0x00200800, 0x00200000, 0x04000802,
+ 0x04000802, 0x04200002, 0x04200002, 0x00000002,
+ 0x00200002, 0x04000000, 0x04000800, 0x00200000,
+ 0x04200800, 0x00000802, 0x00200802, 0x04200800,
+ 0x00000802, 0x04000002, 0x04200802, 0x04200000,
+ 0x00200800, 0x00000000, 0x00000002, 0x04200802,
+ 0x00000000, 0x00200802, 0x04200000, 0x00000800,
+ 0x04000002, 0x04000800, 0x00000800, 0x00200002
+};
+
+static const u32 S8[64] = {
+ 0x10001040, 0x00001000, 0x00040000, 0x10041040,
+ 0x10000000, 0x10001040, 0x00000040, 0x10000000,
+ 0x00040040, 0x10040000, 0x10041040, 0x00041000,
+ 0x10041000, 0x00041040, 0x00001000, 0x00000040,
+ 0x10040000, 0x10000040, 0x10001000, 0x00001040,
+ 0x00041000, 0x00040040, 0x10040040, 0x10041000,
+ 0x00001040, 0x00000000, 0x00000000, 0x10040040,
+ 0x10000040, 0x10001000, 0x00041040, 0x00040000,
+ 0x00041040, 0x00040000, 0x10041000, 0x00001000,
+ 0x00000040, 0x10040040, 0x00001000, 0x00041040,
+ 0x10001000, 0x00000040, 0x10000040, 0x10040000,
+ 0x10040040, 0x10000000, 0x00040000, 0x10001040,
+ 0x00000000, 0x10041040, 0x00040040, 0x10000040,
+ 0x10040000, 0x10001000, 0x10001040, 0x00000000,
+ 0x10041040, 0x00041000, 0x00041000, 0x00001040,
+ 0x00001040, 0x00040040, 0x10000000, 0x10041000
+};
+
+/* Encryption components: IP, FP, and round function */
+
+#define IP(L, R, T) \
+ ROL(R, 4); \
+ T = L; \
+ L ^= R; \
+ L &= 0xf0f0f0f0; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 12); \
+ T = L; \
+ L ^= R; \
+ L &= 0xffff0000; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 14); \
+ T = L; \
+ L ^= R; \
+ L &= 0xcccccccc; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 6); \
+ T = L; \
+ L ^= R; \
+ L &= 0xff00ff00; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 7); \
+ T = L; \
+ L ^= R; \
+ L &= 0xaaaaaaaa; \
+ R ^= L; \
+ L ^= T; \
+ ROL(L, 1);
+
+#define FP(L, R, T) \
+ ROR(L, 1); \
+ T = L; \
+ L ^= R; \
+ L &= 0xaaaaaaaa; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 7); \
+ T = L; \
+ L ^= R; \
+ L &= 0xff00ff00; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 6); \
+ T = L; \
+ L ^= R; \
+ L &= 0xcccccccc; \
+ R ^= L; \
+ L ^= T; \
+ ROL(R, 14); \
+ T = L; \
+ L ^= R; \
+ L &= 0xffff0000; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 12); \
+ T = L; \
+ L ^= R; \
+ L &= 0xf0f0f0f0; \
+ R ^= L; \
+ L ^= T; \
+ ROR(R, 4);
+
+#define ROUND(L, R, A, B, K, d) \
+ B = K[0]; A = K[1]; K += d; \
+ B ^= R; A ^= R; \
+ B &= 0x3f3f3f3f; ROR(A, 4); \
+ L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \
+ L ^= S6[0xff & (B >> 8)]; B >>= 16; \
+ L ^= S7[0xff & A]; \
+ L ^= S5[0xff & (A >> 8)]; A >>= 16; \
+ L ^= S4[0xff & B]; \
+ L ^= S2[0xff & (B >> 8)]; \
+ L ^= S3[0xff & A]; \
+ L ^= S1[0xff & (A >> 8)];
+
+/*
+ * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved
+ * tables of 128 elements. One set is for C_i and the other for D_i, while
+ * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i.
+ *
+ * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i
+ * or D_i in bits 7-1 (bit 0 being the least significant).
+ */
+
+#define T1(x) pt[2 * (x) + 0]
+#define T2(x) pt[2 * (x) + 1]
+#define T3(x) pt[2 * (x) + 2]
+#define T4(x) pt[2 * (x) + 3]
+
+#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
+
+/*
+ * Encryption key expansion
+ *
+ * RFC2451: Weak key checks SHOULD be performed.
+ *
+ * FIPS 74:
+ *
+ * Keys having duals are keys which produce all zeros, all ones, or
+ * alternating zero-one patterns in the C and D registers after Permuted
+ * Choice 1 has operated on the key.
+ *
+ */
+static unsigned long des_ekey(u32 *pe, const u8 *k)
+{
+ /* K&R: long is at least 32 bits */
+ unsigned long a, b, c, d, w;
+ const u32 *pt = pc2;
+
+ d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
+ c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
+ b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
+ a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
+
+ pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
+
+ /* Check if first half is weak */
+ w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
+
+ /* Skip to next table set */
+ pt += 512;
+
+ d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
+ c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
+ b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
+ a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
+
+ /* Check if second half is weak */
+ w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
+
+ pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
+
+ /* Fixup: 2413 5768 -> 1357 2468 */
+ for (d = 0; d < 16; ++d) {
+ a = pe[2 * d];
+ b = pe[2 * d + 1];
+ c = a ^ b;
+ c &= 0xffff0000;
+ a ^= c;
+ b ^= c;
+ ROL(b, 18);
+ pe[2 * d] = a;
+ pe[2 * d + 1] = b;
+ }
+
+ /* Zero if weak key */
+ return w;
+}
+
+int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+
+ return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY;
+}
+EXPORT_SYMBOL_GPL(des_expand_key);
+
+/*
+ * Decryption key expansion
+ *
+ * No weak key checking is performed, as this is only used by triple DES
+ *
+ */
+static void dkey(u32 *pe, const u8 *k)
+{
+ /* K&R: long is at least 32 bits */
+ unsigned long a, b, c, d;
+ const u32 *pt = pc2;
+
+ d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
+ c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
+ b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
+ a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
+
+ pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2] = DES_PC2(b, c, d, a);
+
+ /* Skip to next table set */
+ pt += 512;
+
+ d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
+ c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
+ b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
+ a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
+
+ pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
+ pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
+ pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
+ pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
+ pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
+ pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
+ pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
+ pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
+
+ /* Fixup: 2413 5768 -> 1357 2468 */
+ for (d = 0; d < 16; ++d) {
+ a = pe[2 * d];
+ b = pe[2 * d + 1];
+ c = a ^ b;
+ c &= 0xffff0000;
+ a ^= c;
+ b ^= c;
+ ROL(b, 18);
+ pe[2 * d] = a;
+ pe[2 * d + 1] = b;
+ }
+}
+
+void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = ctx->expkey;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, 2);
+ ROUND(R, L, A, B, K, 2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des_encrypt);
+
+void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, -2);
+ ROUND(R, L, A, B, K, -2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des_decrypt);
+
+int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key,
+ unsigned int keylen)
+{
+ u32 *pe = ctx->expkey;
+ int err;
+
+ if (keylen != DES3_EDE_KEY_SIZE)
+ return -EINVAL;
+
+ err = des3_ede_verify_key(key, keylen, true);
+ if (err && err != -ENOKEY)
+ return err;
+
+ des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
+ dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
+ des_ekey(pe, key);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(des3_ede_expand_key);
+
+void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = dctx->expkey;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, 2);
+ ROUND(R, L, A, B, K, 2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(R, L, A, B, K, 2);
+ ROUND(L, R, A, B, K, 2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, 2);
+ ROUND(R, L, A, B, K, 2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des3_ede_encrypt);
+
+void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
+{
+ const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
+ u32 L, R, A, B;
+ int i;
+
+ L = get_unaligned_le32(src);
+ R = get_unaligned_le32(src + 4);
+
+ IP(L, R, A);
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, -2);
+ ROUND(R, L, A, B, K, -2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(R, L, A, B, K, -2);
+ ROUND(L, R, A, B, K, -2);
+ }
+ for (i = 0; i < 8; i++) {
+ ROUND(L, R, A, B, K, -2);
+ ROUND(R, L, A, B, K, -2);
+ }
+ FP(R, L, A);
+
+ put_unaligned_le32(R, dst);
+ put_unaligned_le32(L, dst + 4);
+}
+EXPORT_SYMBOL_GPL(des3_ede_decrypt);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
new file mode 100644
index 000000000..8f8c45e0c
--- /dev/null
+++ b/lib/crypto/gf128mul.c
@@ -0,0 +1,436 @@
+/* gf128mul.c - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue 31/01/2006
+
+ This file provides fast multiplication in GF(2^128) as required by several
+ cryptographic authentication modes
+*/
+
+#include <crypto/gf128mul.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define gf128mul_dat(q) { \
+ q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
+ q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
+ q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
+ q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
+ q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
+ q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
+ q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
+ q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
+ q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
+ q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
+ q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
+ q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
+ q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
+ q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
+ q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
+ q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
+ q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
+ q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
+ q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
+ q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
+ q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
+ q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
+ q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
+ q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
+ q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
+ q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
+ q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
+ q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
+ q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
+ q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
+ q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
+ q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
+}
+
+/*
+ * Given a value i in 0..255 as the byte overflow when a field element
+ * in GF(2^128) is multiplied by x^8, the following macro returns the
+ * 16-bit value that must be XOR-ed into the low-degree end of the
+ * product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
+ *
+ * There are two versions of the macro, and hence two tables: one for
+ * the "be" convention where the highest-order bit is the coefficient of
+ * the highest-degree polynomial term, and one for the "le" convention
+ * where the highest-order bit is the coefficient of the lowest-degree
+ * polynomial term. In both cases the values are stored in CPU byte
+ * endianness such that the coefficients are ordered consistently across
+ * bytes, i.e. in the "be" table bits 15..0 of the stored value
+ * correspond to the coefficients of x^15..x^0, and in the "le" table
+ * bits 15..0 correspond to the coefficients of x^0..x^15.
+ *
+ * Therefore, provided that the appropriate byte endianness conversions
+ * are done by the multiplication functions (and these must be in place
+ * anyway to support both little endian and big endian CPUs), the "be"
+ * table can be used for multiplications of both "bbe" and "ble"
+ * elements, and the "le" table can be used for multiplications of both
+ * "lle" and "lbe" elements.
+ */
+
+#define xda_be(i) ( \
+ (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
+ (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
+ (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
+ (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
+)
+
+#define xda_le(i) ( \
+ (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
+ (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
+ (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
+ (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
+)
+
+static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
+static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
+
+/*
+ * The following functions multiply a field element by x^8 in
+ * the polynomial field representation. They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
+ * correctly on both styles of machine.
+ */
+
+static void gf128mul_x8_lle(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_le[b & 0xff];
+
+ x->b = cpu_to_be64((b >> 8) | (a << 56));
+ x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+/* time invariant version of gf128mul_x8_lle */
+static void gf128mul_x8_lle_ti(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = xda_le(b & 0xff); /* avoid table lookup */
+
+ x->b = cpu_to_be64((b >> 8) | (a << 56));
+ x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+static void gf128mul_x8_bbe(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ x->a = cpu_to_be64((a << 8) | (b >> 56));
+ x->b = cpu_to_be64((b << 8) ^ _tt);
+}
+
+void gf128mul_x8_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ r->a = cpu_to_le64((a << 8) | (b >> 56));
+ r->b = cpu_to_le64((b << 8) ^ _tt);
+}
+EXPORT_SYMBOL(gf128mul_x8_ble);
+
+void gf128mul_lle(be128 *r, const be128 *b)
+{
+ /*
+ * The p array should be aligned to twice the size of its element type,
+ * so that every even/odd pair is guaranteed to share a cacheline
+ * (assuming a cacheline size of 32 bytes or more, which is by far the
+ * most common). This ensures that each be128_xor() call in the loop
+ * takes the same amount of time regardless of the value of 'ch', which
+ * is derived from function parameter 'b', which is commonly used as a
+ * key, e.g., for GHASH. The odd array elements are all set to zero,
+ * making each be128_xor() a NOP if its associated bit in 'ch' is not
+ * set, and this is equivalent to calling be128_xor() conditionally.
+ * This approach aims to avoid leaking information about such keys
+ * through execution time variances.
+ *
+ * Unfortunately, __aligned(16) or higher does not work on x86 for
+ * variables on the stack so we need to perform the alignment by hand.
+ */
+ be128 array[16 + 3] = {};
+ be128 *p = PTR_ALIGN(&array[0], 2 * sizeof(be128));
+ int i;
+
+ p[0] = *r;
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_lle(&p[2 * i + 2], &p[2 * i]);
+
+ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[15 - i];
+
+ be128_xor(r, r, &p[ 0 + !(ch & 0x80)]);
+ be128_xor(r, r, &p[ 2 + !(ch & 0x40)]);
+ be128_xor(r, r, &p[ 4 + !(ch & 0x20)]);
+ be128_xor(r, r, &p[ 6 + !(ch & 0x10)]);
+ be128_xor(r, r, &p[ 8 + !(ch & 0x08)]);
+ be128_xor(r, r, &p[10 + !(ch & 0x04)]);
+ be128_xor(r, r, &p[12 + !(ch & 0x02)]);
+ be128_xor(r, r, &p[14 + !(ch & 0x01)]);
+
+ if (++i >= 16)
+ break;
+
+ gf128mul_x8_lle_ti(r); /* use the time invariant version */
+ }
+}
+EXPORT_SYMBOL(gf128mul_lle);
+
+void gf128mul_bbe(be128 *r, const be128 *b)
+{
+ be128 p[8];
+ int i;
+
+ p[0] = *r;
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_bbe(&p[i + 1], &p[i]);
+
+ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[i];
+
+ if (ch & 0x80)
+ be128_xor(r, r, &p[7]);
+ if (ch & 0x40)
+ be128_xor(r, r, &p[6]);
+ if (ch & 0x20)
+ be128_xor(r, r, &p[5]);
+ if (ch & 0x10)
+ be128_xor(r, r, &p[4]);
+ if (ch & 0x08)
+ be128_xor(r, r, &p[3]);
+ if (ch & 0x04)
+ be128_xor(r, r, &p[2]);
+ if (ch & 0x02)
+ be128_xor(r, r, &p[1]);
+ if (ch & 0x01)
+ be128_xor(r, r, &p[0]);
+
+ if (++i >= 16)
+ break;
+
+ gf128mul_x8_bbe(r);
+ }
+}
+EXPORT_SYMBOL(gf128mul_bbe);
+
+/* This version uses 64k bytes of table space.
+ A 16 byte buffer has to be multiplied by a 16 byte key
+ value in GF(2^128). If we consider a GF(2^128) value in
+ the buffer's lowest byte, we can construct a table of
+ the 256 16 byte values that result from the 256 values
+ of this byte. This requires 4096 bytes. But we also
+ need tables for each of the 16 higher bytes in the
+ buffer as well, which makes 64 kbytes in total.
+*/
+/* additional explanation
+ * t[0][BYTE] contains g*BYTE
+ * t[1][BYTE] contains g*x^8*BYTE
+ * ..
+ * t[15][BYTE] contains g*x^120*BYTE */
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
+{
+ struct gf128mul_64k *t;
+ int i, j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ for (i = 0; i < 16; i++) {
+ t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+ if (!t->t[i]) {
+ gf128mul_free_64k(t);
+ t = NULL;
+ goto out;
+ }
+ }
+
+ t->t[0]->t[1] = *g;
+ for (j = 1; j <= 64; j <<= 1)
+ gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
+
+ for (i = 0;;) {
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[i]->t[j + k],
+ &t->t[i]->t[j], &t->t[i]->t[k]);
+
+ if (++i >= 16)
+ break;
+
+ for (j = 128; j > 0; j >>= 1) {
+ t->t[i]->t[j] = t->t[i - 1]->t[j];
+ gf128mul_x8_bbe(&t->t[i]->t[j]);
+ }
+ }
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_bbe);
+
+void gf128mul_free_64k(struct gf128mul_64k *t)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ kfree_sensitive(t->t[i]);
+ kfree_sensitive(t);
+}
+EXPORT_SYMBOL(gf128mul_free_64k);
+
+void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i;
+
+ *r = t->t[0]->t[ap[15]];
+ for (i = 1; i < 16; ++i)
+ be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_bbe);
+
+/* This version uses 4k bytes of table space.
+ A 16 byte buffer has to be multiplied by a 16 byte key
+ value in GF(2^128). If we consider a GF(2^128) value in a
+ single byte, we can construct a table of the 256 16 byte
+ values that result from the 256 values of this byte.
+ This requires 4096 bytes. If we take the highest byte in
+ the buffer and use this table to get the result, we then
+ have to multiply by x^120 to get the final value. For the
+ next highest byte the result has to be multiplied by x^112
+ and so on. But we can do this by accumulating the result
+ in an accumulator starting with the result for the top
+ byte. We repeatedly multiply the accumulator value by
+ x^8 and then add in (i.e. xor) the 16 bytes of the next
+ lower byte in the buffer, stopping when we reach the
+ lowest byte. This requires a 4096 byte table.
+*/
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
+{
+ struct gf128mul_4k *t;
+ int j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ t->t[128] = *g;
+ for (j = 64; j > 0; j >>= 1)
+ gf128mul_x_lle(&t->t[j], &t->t[j+j]);
+
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_lle);
+
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
+{
+ struct gf128mul_4k *t;
+ int j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ t->t[1] = *g;
+ for (j = 1; j <= 64; j <<= 1)
+ gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
+
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_bbe);
+
+void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i = 15;
+
+ *r = t->t[ap[15]];
+ while (i--) {
+ gf128mul_x8_lle(r);
+ be128_xor(r, r, &t->t[ap[i]]);
+ }
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_lle);
+
+void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i = 0;
+
+ *r = t->t[ap[0]];
+ while (++i < 16) {
+ gf128mul_x8_bbe(r);
+ be128_xor(r, r, &t->t[ap[i]]);
+ }
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_bbe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c
new file mode 100644
index 000000000..dabc3acca
--- /dev/null
+++ b/lib/crypto/libchacha.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The ChaCha stream cipher (RFC7539)
+ *
+ * Copyright (C) 2015 Martin Willi
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include <crypto/algapi.h> // for crypto_xor_cpy
+#include <crypto/chacha.h>
+
+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ /* aligned to potentially speed up crypto_xor() */
+ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
+
+ while (bytes >= CHACHA_BLOCK_SIZE) {
+ chacha_block_generic(state, stream, nrounds);
+ crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
+ bytes -= CHACHA_BLOCK_SIZE;
+ dst += CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
+ }
+ if (bytes) {
+ chacha_block_generic(state, stream, nrounds);
+ crypto_xor_cpy(dst, src, stream, bytes);
+ }
+}
+EXPORT_SYMBOL(chacha_crypt_generic);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/memneq.c b/lib/crypto/memneq.c
new file mode 100644
index 000000000..243d8677c
--- /dev/null
+++ b/lib/crypto/memneq.c
@@ -0,0 +1,173 @@
+/*
+ * Constant-time equality testing of memory regions.
+ *
+ * Authors:
+ *
+ * James Yonan <james@openvpn.net>
+ * Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of OpenVPN Technologies nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+/* Generic path for arbitrary size */
+static inline unsigned long
+__crypto_memneq_generic(const void *a, const void *b, size_t size)
+{
+ unsigned long neq = 0;
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ while (size >= sizeof(unsigned long)) {
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ a += sizeof(unsigned long);
+ b += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+ }
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ while (size > 0) {
+ neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+ OPTIMIZER_HIDE_VAR(neq);
+ a += 1;
+ b += 1;
+ size -= 1;
+ }
+ return neq;
+}
+
+/* Loop-free fast-path for frequently used 16-byte size */
+static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
+{
+ unsigned long neq = 0;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (sizeof(unsigned long) == 8) {
+ neq |= get_unaligned((unsigned long *)a) ^
+ get_unaligned((unsigned long *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned long *)(a + 8)) ^
+ get_unaligned((unsigned long *)(b + 8));
+ OPTIMIZER_HIDE_VAR(neq);
+ } else if (sizeof(unsigned int) == 4) {
+ neq |= get_unaligned((unsigned int *)a) ^
+ get_unaligned((unsigned int *)b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 4)) ^
+ get_unaligned((unsigned int *)(b + 4));
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 8)) ^
+ get_unaligned((unsigned int *)(b + 8));
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= get_unaligned((unsigned int *)(a + 12)) ^
+ get_unaligned((unsigned int *)(b + 12));
+ OPTIMIZER_HIDE_VAR(neq);
+ } else
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+ {
+ neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
+ OPTIMIZER_HIDE_VAR(neq);
+ neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
+ OPTIMIZER_HIDE_VAR(neq);
+ }
+
+ return neq;
+}
+
+/* Compare two areas of memory without leaking timing information,
+ * and with special optimizations for common sizes. Users should
+ * not call this function directly, but should instead use
+ * crypto_memneq defined in crypto/algapi.h.
+ */
+noinline unsigned long __crypto_memneq(const void *a, const void *b,
+ size_t size)
+{
+ switch (size) {
+ case 16:
+ return __crypto_memneq_16(a, b);
+ default:
+ return __crypto_memneq_generic(a, b, size);
+ }
+}
+EXPORT_SYMBOL(__crypto_memneq);
diff --git a/lib/crypto/mpi/Makefile b/lib/crypto/mpi/Makefile
new file mode 100644
index 000000000..6e6ef9a34
--- /dev/null
+++ b/lib/crypto/mpi/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MPI multiprecision maths library (from gpg)
+#
+
+obj-$(CONFIG_MPILIB) = mpi.o
+
+mpi-y = \
+ generic_mpih-lshift.o \
+ generic_mpih-mul1.o \
+ generic_mpih-mul2.o \
+ generic_mpih-mul3.o \
+ generic_mpih-rshift.o \
+ generic_mpih-sub1.o \
+ generic_mpih-add1.o \
+ ec.o \
+ mpicoder.o \
+ mpi-add.o \
+ mpi-bit.o \
+ mpi-cmp.o \
+ mpi-sub-ui.o \
+ mpi-div.o \
+ mpi-inv.o \
+ mpi-mod.o \
+ mpi-mul.o \
+ mpih-cmp.o \
+ mpih-div.o \
+ mpih-mul.o \
+ mpi-pow.o \
+ mpiutil.o
diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
new file mode 100644
index 000000000..e16dca1e2
--- /dev/null
+++ b/lib/crypto/mpi/ec.c
@@ -0,0 +1,1509 @@
+/* ec.c - Elliptic Curve functions
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ * Copyright (C) 2013 g10 Code GmbH
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#define point_init(a) mpi_point_init((a))
+#define point_free(a) mpi_point_free_parts((a))
+
+#define log_error(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+#define log_fatal(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
+
+#define DIM(v) (sizeof(v)/sizeof((v)[0]))
+
+
+/* Create a new point option. NBITS gives the size in bits of one
+ * coordinate; it is only used to pre-allocate some resources and
+ * might also be passed as 0 to use a default value.
+ */
+MPI_POINT mpi_point_new(unsigned int nbits)
+{
+ MPI_POINT p;
+
+ (void)nbits; /* Currently not used. */
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p)
+ mpi_point_init(p);
+ return p;
+}
+EXPORT_SYMBOL_GPL(mpi_point_new);
+
+/* Release the point object P. P may be NULL. */
+void mpi_point_release(MPI_POINT p)
+{
+ if (p) {
+ mpi_point_free_parts(p);
+ kfree(p);
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_point_release);
+
+/* Initialize the fields of a point object. gcry_mpi_point_free_parts
+ * may be used to release the fields.
+ */
+void mpi_point_init(MPI_POINT p)
+{
+ p->x = mpi_new(0);
+ p->y = mpi_new(0);
+ p->z = mpi_new(0);
+}
+EXPORT_SYMBOL_GPL(mpi_point_init);
+
+/* Release the parts of a point object. */
+void mpi_point_free_parts(MPI_POINT p)
+{
+ mpi_free(p->x); p->x = NULL;
+ mpi_free(p->y); p->y = NULL;
+ mpi_free(p->z); p->z = NULL;
+}
+EXPORT_SYMBOL_GPL(mpi_point_free_parts);
+
+/* Set the value from S into D. */
+static void point_set(MPI_POINT d, MPI_POINT s)
+{
+ mpi_set(d->x, s->x);
+ mpi_set(d->y, s->y);
+ mpi_set(d->z, s->z);
+}
+
+static void point_resize(MPI_POINT p, struct mpi_ec_ctx *ctx)
+{
+ size_t nlimbs = ctx->p->nlimbs;
+
+ mpi_resize(p->x, nlimbs);
+ p->x->nlimbs = nlimbs;
+ mpi_resize(p->z, nlimbs);
+ p->z->nlimbs = nlimbs;
+
+ if (ctx->model != MPI_EC_MONTGOMERY) {
+ mpi_resize(p->y, nlimbs);
+ p->y->nlimbs = nlimbs;
+ }
+}
+
+static void point_swap_cond(MPI_POINT d, MPI_POINT s, unsigned long swap,
+ struct mpi_ec_ctx *ctx)
+{
+ mpi_swap_cond(d->x, s->x, swap);
+ if (ctx->model != MPI_EC_MONTGOMERY)
+ mpi_swap_cond(d->y, s->y, swap);
+ mpi_swap_cond(d->z, s->z, swap);
+}
+
+
+/* W = W mod P. */
+static void ec_mod(MPI w, struct mpi_ec_ctx *ec)
+{
+ if (ec->t.p_barrett)
+ mpi_mod_barrett(w, w, ec->t.p_barrett);
+ else
+ mpi_mod(w, w, ec->p);
+}
+
+static void ec_addm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_add(w, u, v);
+ ec_mod(w, ctx);
+}
+
+static void ec_subm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec)
+{
+ mpi_sub(w, u, v);
+ while (w->sign)
+ mpi_add(w, w, ec->p);
+ /*ec_mod(w, ec);*/
+}
+
+static void ec_mulm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_mul(w, u, v);
+ ec_mod(w, ctx);
+}
+
+/* W = 2 * U mod P. */
+static void ec_mul2(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ mpi_lshift(w, u, 1);
+ ec_mod(w, ctx);
+}
+
+static void ec_powm(MPI w, const MPI b, const MPI e,
+ struct mpi_ec_ctx *ctx)
+{
+ mpi_powm(w, b, e, ctx->p);
+ /* mpi_abs(w); */
+}
+
+/* Shortcut for
+ * ec_powm(B, B, mpi_const(MPI_C_TWO), ctx);
+ * for easier optimization.
+ */
+static void ec_pow2(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ /* Using mpi_mul is slightly faster (at least on amd64). */
+ /* mpi_powm(w, b, mpi_const(MPI_C_TWO), ctx->p); */
+ ec_mulm(w, b, b, ctx);
+}
+
+/* Shortcut for
+ * ec_powm(B, B, mpi_const(MPI_C_THREE), ctx);
+ * for easier optimization.
+ */
+static void ec_pow3(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ mpi_powm(w, b, mpi_const(MPI_C_THREE), ctx->p);
+}
+
+static void ec_invm(MPI x, MPI a, struct mpi_ec_ctx *ctx)
+{
+ if (!mpi_invm(x, a, ctx->p))
+ log_error("ec_invm: inverse does not exist:\n");
+}
+
+static void mpih_set_cond(mpi_ptr_t wp, mpi_ptr_t up,
+ mpi_size_t usize, unsigned long set)
+{
+ mpi_size_t i;
+ mpi_limb_t mask = ((mpi_limb_t)0) - set;
+ mpi_limb_t x;
+
+ for (i = 0; i < usize; i++) {
+ x = mask & (wp[i] ^ up[i]);
+ wp[i] = wp[i] ^ x;
+ }
+}
+
+/* Routines for 2^255 - 19. */
+
+#define LIMB_SIZE_25519 ((256+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
+
+static void ec_addm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("addm_25519: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_add_n(wp, up, vp, wsize);
+ borrow = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+}
+
+static void ec_subm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("subm_25519: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ borrow = mpihelp_sub_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+}
+
+static void ec_mulm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_25519;
+ mpi_limb_t n[LIMB_SIZE_25519*2];
+ mpi_limb_t m[LIMB_SIZE_25519+1];
+ mpi_limb_t cy;
+ int msb;
+
+ (void)ctx;
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("mulm_25519: different sizes\n");
+
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_mul_n(n, up, vp, wsize);
+ memcpy(wp, n, wsize * BYTES_PER_MPI_LIMB);
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+
+ memcpy(m, n+LIMB_SIZE_25519-1, (wsize+1) * BYTES_PER_MPI_LIMB);
+ mpihelp_rshift(m, m, LIMB_SIZE_25519+1, (255 % BITS_PER_MPI_LIMB));
+
+ memcpy(n, m, wsize * BYTES_PER_MPI_LIMB);
+ cy = mpihelp_lshift(m, m, LIMB_SIZE_25519, 4);
+ m[LIMB_SIZE_25519] = cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+ cy = mpihelp_add_n(m, m, n, wsize);
+ m[LIMB_SIZE_25519] += cy;
+
+ cy = mpihelp_add_n(wp, wp, m, wsize);
+ m[LIMB_SIZE_25519] += cy;
+
+ memset(m, 0, wsize * BYTES_PER_MPI_LIMB);
+ msb = (wp[LIMB_SIZE_25519-1] >> (255 % BITS_PER_MPI_LIMB));
+ m[0] = (m[LIMB_SIZE_25519] * 2 + msb) * 19;
+ wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
+ mpihelp_add_n(wp, wp, m, wsize);
+
+ m[0] = 0;
+ cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(m, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_add_n(wp, wp, m, wsize);
+}
+
+static void ec_mul2_25519(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ ec_addm_25519(w, u, u, ctx);
+}
+
+static void ec_pow2_25519(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ ec_mulm_25519(w, b, b, ctx);
+}
+
+/* Routines for 2^448 - 2^224 - 1. */
+
+#define LIMB_SIZE_448 ((448+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
+#define LIMB_SIZE_HALF_448 ((LIMB_SIZE_448+1)/2)
+
+static void ec_addm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448];
+ mpi_limb_t cy;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("addm_448: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ cy = mpihelp_add_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_sub_n(wp, wp, n, wsize);
+}
+
+static void ec_subm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448];
+ mpi_limb_t borrow;
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("subm_448: different sizes\n");
+
+ memset(n, 0, sizeof(n));
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ borrow = mpihelp_sub_n(wp, up, vp, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+}
+
+static void ec_mulm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t wsize = LIMB_SIZE_448;
+ mpi_limb_t n[LIMB_SIZE_448*2];
+ mpi_limb_t a2[LIMB_SIZE_HALF_448];
+ mpi_limb_t a3[LIMB_SIZE_HALF_448];
+ mpi_limb_t b0[LIMB_SIZE_HALF_448];
+ mpi_limb_t b1[LIMB_SIZE_HALF_448];
+ mpi_limb_t cy;
+ int i;
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ mpi_limb_t b1_rest, a3_rest;
+#endif
+
+ if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
+ log_bug("mulm_448: different sizes\n");
+
+ up = u->d;
+ vp = v->d;
+ wp = w->d;
+
+ mpihelp_mul_n(n, up, vp, wsize);
+
+ for (i = 0; i < (wsize + 1) / 2; i++) {
+ b0[i] = n[i];
+ b1[i] = n[i+wsize/2];
+ a2[i] = n[i+wsize];
+ a3[i] = n[i+wsize+wsize/2];
+ }
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ b0[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
+ a2[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
+
+ b1_rest = 0;
+ a3_rest = 0;
+
+ for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
+ mpi_limb_t b1v, a3v;
+ b1v = b1[i];
+ a3v = a3[i];
+ b1[i] = (b1_rest << 32) | (b1v >> 32);
+ a3[i] = (a3_rest << 32) | (a3v >> 32);
+ b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
+ a3_rest = a3v & (((mpi_limb_t)1UL << 32)-1);
+ }
+#endif
+
+ cy = mpihelp_add_n(b0, b0, a2, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b0, b0, a3, LIMB_SIZE_HALF_448);
+ for (i = 0; i < (wsize + 1) / 2; i++)
+ wp[i] = b0[i];
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ wp[LIMB_SIZE_HALF_448-1] &= (((mpi_limb_t)1UL << 32)-1);
+#endif
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ cy = b0[LIMB_SIZE_HALF_448-1] >> 32;
+#endif
+
+ cy = mpihelp_add_1(b1, b1, LIMB_SIZE_HALF_448, cy);
+ cy += mpihelp_add_n(b1, b1, a2, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
+ cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ b1_rest = 0;
+ for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
+ mpi_limb_t b1v = b1[i];
+ b1[i] = (b1_rest << 32) | (b1v >> 32);
+ b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
+ }
+ wp[LIMB_SIZE_HALF_448-1] |= (b1_rest << 32);
+#endif
+ for (i = 0; i < wsize / 2; i++)
+ wp[i+(wsize + 1) / 2] = b1[i];
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ cy = b1[LIMB_SIZE_HALF_448-1];
+#endif
+
+ memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
+
+#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
+ n[LIMB_SIZE_HALF_448-1] = cy << 32;
+#else
+ n[LIMB_SIZE_HALF_448] = cy;
+#endif
+ n[0] = cy;
+ mpihelp_add_n(wp, wp, n, wsize);
+
+ memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
+ cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
+ mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
+ mpihelp_add_n(wp, wp, n, wsize);
+}
+
+static void ec_mul2_448(MPI w, MPI u, struct mpi_ec_ctx *ctx)
+{
+ ec_addm_448(w, u, u, ctx);
+}
+
+static void ec_pow2_448(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
+{
+ ec_mulm_448(w, b, b, ctx);
+}
+
+struct field_table {
+ const char *p;
+
+ /* computation routines for the field. */
+ void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
+ void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
+ void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
+};
+
+static const struct field_table field_table[] = {
+ {
+ "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
+ ec_addm_25519,
+ ec_subm_25519,
+ ec_mulm_25519,
+ ec_mul2_25519,
+ ec_pow2_25519
+ },
+ {
+ "0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE"
+ "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
+ ec_addm_448,
+ ec_subm_448,
+ ec_mulm_448,
+ ec_mul2_448,
+ ec_pow2_448
+ },
+ { NULL, NULL, NULL, NULL, NULL, NULL },
+};
+
+/* Force recomputation of all helper variables. */
+static void mpi_ec_get_reset(struct mpi_ec_ctx *ec)
+{
+ ec->t.valid.a_is_pminus3 = 0;
+ ec->t.valid.two_inv_p = 0;
+}
+
+/* Accessor for helper variable. */
+static int ec_get_a_is_pminus3(struct mpi_ec_ctx *ec)
+{
+ MPI tmp;
+
+ if (!ec->t.valid.a_is_pminus3) {
+ ec->t.valid.a_is_pminus3 = 1;
+ tmp = mpi_alloc_like(ec->p);
+ mpi_sub_ui(tmp, ec->p, 3);
+ ec->t.a_is_pminus3 = !mpi_cmp(ec->a, tmp);
+ mpi_free(tmp);
+ }
+
+ return ec->t.a_is_pminus3;
+}
+
+/* Accessor for helper variable. */
+static MPI ec_get_two_inv_p(struct mpi_ec_ctx *ec)
+{
+ if (!ec->t.valid.two_inv_p) {
+ ec->t.valid.two_inv_p = 1;
+ if (!ec->t.two_inv_p)
+ ec->t.two_inv_p = mpi_alloc(0);
+ ec_invm(ec->t.two_inv_p, mpi_const(MPI_C_TWO), ec);
+ }
+ return ec->t.two_inv_p;
+}
+
+static const char *const curve25519_bad_points[] = {
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
+ "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "0x0000000000000000000000000000000000000000000000000000000000000001",
+ "0x00b8495f16056286fdb1329ceb8d09da6ac49ff1fae35616aeb8413b7c7aebe0",
+ "0x57119fd0dd4e22d8868e1c58c45c44045bef839c55b1d0b1248c50a3bc959c5f",
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec",
+ "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
+ NULL
+};
+
+static const char *const curve448_bad_points[] = {
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "0x00000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000000",
+ "0x00000000000000000000000000000000000000000000000000000000"
+ "00000000000000000000000000000000000000000000000000000001",
+ "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
+ "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+ "00000000000000000000000000000000000000000000000000000000",
+ NULL
+};
+
+static const char *const *bad_points_table[] = {
+ curve25519_bad_points,
+ curve448_bad_points,
+};
+
+static void mpi_ec_coefficient_normalize(MPI a, MPI p)
+{
+ if (a->sign) {
+ mpi_resize(a, p->nlimbs);
+ mpihelp_sub_n(a->d, p->d, a->d, p->nlimbs);
+ a->nlimbs = p->nlimbs;
+ a->sign = 0;
+ }
+}
+
+/* This function initialized a context for elliptic curve based on the
+ * field GF(p). P is the prime specifying this field, A is the first
+ * coefficient. CTX is expected to be zeroized.
+ */
+void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
+ enum ecc_dialects dialect,
+ int flags, MPI p, MPI a, MPI b)
+{
+ int i;
+ static int use_barrett = -1 /* TODO: 1 or -1 */;
+
+ mpi_ec_coefficient_normalize(a, p);
+ mpi_ec_coefficient_normalize(b, p);
+
+ /* Fixme: Do we want to check some constraints? e.g. a < p */
+
+ ctx->model = model;
+ ctx->dialect = dialect;
+ ctx->flags = flags;
+ if (dialect == ECC_DIALECT_ED25519)
+ ctx->nbits = 256;
+ else
+ ctx->nbits = mpi_get_nbits(p);
+ ctx->p = mpi_copy(p);
+ ctx->a = mpi_copy(a);
+ ctx->b = mpi_copy(b);
+
+ ctx->d = NULL;
+ ctx->t.two_inv_p = NULL;
+
+ ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
+
+ mpi_ec_get_reset(ctx);
+
+ if (model == MPI_EC_MONTGOMERY) {
+ for (i = 0; i < DIM(bad_points_table); i++) {
+ MPI p_candidate = mpi_scanval(bad_points_table[i][0]);
+ int match_p = !mpi_cmp(ctx->p, p_candidate);
+ int j;
+
+ mpi_free(p_candidate);
+ if (!match_p)
+ continue;
+
+ for (j = 0; i < DIM(ctx->t.scratch) && bad_points_table[i][j]; j++)
+ ctx->t.scratch[j] = mpi_scanval(bad_points_table[i][j]);
+ }
+ } else {
+ /* Allocate scratch variables. */
+ for (i = 0; i < DIM(ctx->t.scratch); i++)
+ ctx->t.scratch[i] = mpi_alloc_like(ctx->p);
+ }
+
+ ctx->addm = ec_addm;
+ ctx->subm = ec_subm;
+ ctx->mulm = ec_mulm;
+ ctx->mul2 = ec_mul2;
+ ctx->pow2 = ec_pow2;
+
+ for (i = 0; field_table[i].p; i++) {
+ MPI f_p;
+
+ f_p = mpi_scanval(field_table[i].p);
+ if (!f_p)
+ break;
+
+ if (!mpi_cmp(p, f_p)) {
+ ctx->addm = field_table[i].addm;
+ ctx->subm = field_table[i].subm;
+ ctx->mulm = field_table[i].mulm;
+ ctx->mul2 = field_table[i].mul2;
+ ctx->pow2 = field_table[i].pow2;
+ mpi_free(f_p);
+
+ mpi_resize(ctx->a, ctx->p->nlimbs);
+ ctx->a->nlimbs = ctx->p->nlimbs;
+
+ mpi_resize(ctx->b, ctx->p->nlimbs);
+ ctx->b->nlimbs = ctx->p->nlimbs;
+
+ for (i = 0; i < DIM(ctx->t.scratch) && ctx->t.scratch[i]; i++)
+ ctx->t.scratch[i]->nlimbs = ctx->p->nlimbs;
+
+ break;
+ }
+
+ mpi_free(f_p);
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_init);
+
+void mpi_ec_deinit(struct mpi_ec_ctx *ctx)
+{
+ int i;
+
+ mpi_barrett_free(ctx->t.p_barrett);
+
+ /* Domain parameter. */
+ mpi_free(ctx->p);
+ mpi_free(ctx->a);
+ mpi_free(ctx->b);
+ mpi_point_release(ctx->G);
+ mpi_free(ctx->n);
+
+ /* The key. */
+ mpi_point_release(ctx->Q);
+ mpi_free(ctx->d);
+
+ /* Private data of ec.c. */
+ mpi_free(ctx->t.two_inv_p);
+
+ for (i = 0; i < DIM(ctx->t.scratch); i++)
+ mpi_free(ctx->t.scratch[i]);
+}
+EXPORT_SYMBOL_GPL(mpi_ec_deinit);
+
+/* Compute the affine coordinates from the projective coordinates in
+ * POINT. Set them into X and Y. If one coordinate is not required,
+ * X or Y may be passed as NULL. CTX is the usual context. Returns: 0
+ * on success or !0 if POINT is at infinity.
+ */
+int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ if (!mpi_cmp_ui(point->z, 0))
+ return -1;
+
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */
+ {
+ MPI z1, z2, z3;
+
+ z1 = mpi_new(0);
+ z2 = mpi_new(0);
+ ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */
+ ec_mulm(z2, z1, z1, ctx); /* z2 = z^(-2) mod p */
+
+ if (x)
+ ec_mulm(x, point->x, z2, ctx);
+
+ if (y) {
+ z3 = mpi_new(0);
+ ec_mulm(z3, z2, z1, ctx); /* z3 = z^(-3) mod p */
+ ec_mulm(y, point->y, z3, ctx);
+ mpi_free(z3);
+ }
+
+ mpi_free(z2);
+ mpi_free(z1);
+ }
+ return 0;
+
+ case MPI_EC_MONTGOMERY:
+ {
+ if (x)
+ mpi_set(x, point->x);
+
+ if (y) {
+ log_fatal("%s: Getting Y-coordinate on %s is not supported\n",
+ "mpi_ec_get_affine", "Montgomery");
+ return -1;
+ }
+ }
+ return 0;
+
+ case MPI_EC_EDWARDS:
+ {
+ MPI z;
+
+ z = mpi_new(0);
+ ec_invm(z, point->z, ctx);
+
+ mpi_resize(z, ctx->p->nlimbs);
+ z->nlimbs = ctx->p->nlimbs;
+
+ if (x) {
+ mpi_resize(x, ctx->p->nlimbs);
+ x->nlimbs = ctx->p->nlimbs;
+ ctx->mulm(x, point->x, z, ctx);
+ }
+ if (y) {
+ mpi_resize(y, ctx->p->nlimbs);
+ y->nlimbs = ctx->p->nlimbs;
+ ctx->mulm(y, point->y, z, ctx);
+ }
+
+ mpi_free(z);
+ }
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_get_affine);
+
+/* RESULT = 2 * POINT (Weierstrass version). */
+static void dup_point_weierstrass(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+#define x3 (result->x)
+#define y3 (result->y)
+#define z3 (result->z)
+#define t1 (ctx->t.scratch[0])
+#define t2 (ctx->t.scratch[1])
+#define t3 (ctx->t.scratch[2])
+#define l1 (ctx->t.scratch[3])
+#define l2 (ctx->t.scratch[4])
+#define l3 (ctx->t.scratch[5])
+
+ if (!mpi_cmp_ui(point->y, 0) || !mpi_cmp_ui(point->z, 0)) {
+ /* P_y == 0 || P_z == 0 => [1:1:0] */
+ mpi_set_ui(x3, 1);
+ mpi_set_ui(y3, 1);
+ mpi_set_ui(z3, 0);
+ } else {
+ if (ec_get_a_is_pminus3(ctx)) {
+ /* Use the faster case. */
+ /* L1 = 3(X - Z^2)(X + Z^2) */
+ /* T1: used for Z^2. */
+ /* T2: used for the right term. */
+ ec_pow2(t1, point->z, ctx);
+ ec_subm(l1, point->x, t1, ctx);
+ ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
+ ec_addm(t2, point->x, t1, ctx);
+ ec_mulm(l1, l1, t2, ctx);
+ } else {
+ /* Standard case. */
+ /* L1 = 3X^2 + aZ^4 */
+ /* T1: used for aZ^4. */
+ ec_pow2(l1, point->x, ctx);
+ ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
+ ec_powm(t1, point->z, mpi_const(MPI_C_FOUR), ctx);
+ ec_mulm(t1, t1, ctx->a, ctx);
+ ec_addm(l1, l1, t1, ctx);
+ }
+ /* Z3 = 2YZ */
+ ec_mulm(z3, point->y, point->z, ctx);
+ ec_mul2(z3, z3, ctx);
+
+ /* L2 = 4XY^2 */
+ /* T2: used for Y2; required later. */
+ ec_pow2(t2, point->y, ctx);
+ ec_mulm(l2, t2, point->x, ctx);
+ ec_mulm(l2, l2, mpi_const(MPI_C_FOUR), ctx);
+
+ /* X3 = L1^2 - 2L2 */
+ /* T1: used for L2^2. */
+ ec_pow2(x3, l1, ctx);
+ ec_mul2(t1, l2, ctx);
+ ec_subm(x3, x3, t1, ctx);
+
+ /* L3 = 8Y^4 */
+ /* T2: taken from above. */
+ ec_pow2(t2, t2, ctx);
+ ec_mulm(l3, t2, mpi_const(MPI_C_EIGHT), ctx);
+
+ /* Y3 = L1(L2 - X3) - L3 */
+ ec_subm(y3, l2, x3, ctx);
+ ec_mulm(y3, y3, l1, ctx);
+ ec_subm(y3, y3, l3, ctx);
+ }
+
+#undef x3
+#undef y3
+#undef z3
+#undef t1
+#undef t2
+#undef t3
+#undef l1
+#undef l2
+#undef l3
+}
+
+/* RESULT = 2 * POINT (Montgomery version). */
+static void dup_point_montgomery(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ (void)result;
+ (void)point;
+ (void)ctx;
+ log_fatal("%s: %s not yet supported\n",
+ "mpi_ec_dup_point", "Montgomery");
+}
+
+/* RESULT = 2 * POINT (Twisted Edwards version). */
+static void dup_point_edwards(MPI_POINT result,
+ MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+#define X1 (point->x)
+#define Y1 (point->y)
+#define Z1 (point->z)
+#define X3 (result->x)
+#define Y3 (result->y)
+#define Z3 (result->z)
+#define B (ctx->t.scratch[0])
+#define C (ctx->t.scratch[1])
+#define D (ctx->t.scratch[2])
+#define E (ctx->t.scratch[3])
+#define F (ctx->t.scratch[4])
+#define H (ctx->t.scratch[5])
+#define J (ctx->t.scratch[6])
+
+ /* Compute: (X_3 : Y_3 : Z_3) = 2( X_1 : Y_1 : Z_1 ) */
+
+ /* B = (X_1 + Y_1)^2 */
+ ctx->addm(B, X1, Y1, ctx);
+ ctx->pow2(B, B, ctx);
+
+ /* C = X_1^2 */
+ /* D = Y_1^2 */
+ ctx->pow2(C, X1, ctx);
+ ctx->pow2(D, Y1, ctx);
+
+ /* E = aC */
+ if (ctx->dialect == ECC_DIALECT_ED25519)
+ ctx->subm(E, ctx->p, C, ctx);
+ else
+ ctx->mulm(E, ctx->a, C, ctx);
+
+ /* F = E + D */
+ ctx->addm(F, E, D, ctx);
+
+ /* H = Z_1^2 */
+ ctx->pow2(H, Z1, ctx);
+
+ /* J = F - 2H */
+ ctx->mul2(J, H, ctx);
+ ctx->subm(J, F, J, ctx);
+
+ /* X_3 = (B - C - D) · J */
+ ctx->subm(X3, B, C, ctx);
+ ctx->subm(X3, X3, D, ctx);
+ ctx->mulm(X3, X3, J, ctx);
+
+ /* Y_3 = F · (E - D) */
+ ctx->subm(Y3, E, D, ctx);
+ ctx->mulm(Y3, Y3, F, ctx);
+
+ /* Z_3 = F · J */
+ ctx->mulm(Z3, F, J, ctx);
+
+#undef X1
+#undef Y1
+#undef Z1
+#undef X3
+#undef Y3
+#undef Z3
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef H
+#undef J
+}
+
+/* RESULT = 2 * POINT */
+static void
+mpi_ec_dup_point(MPI_POINT result, MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ dup_point_weierstrass(result, point, ctx);
+ break;
+ case MPI_EC_MONTGOMERY:
+ dup_point_montgomery(result, point, ctx);
+ break;
+ case MPI_EC_EDWARDS:
+ dup_point_edwards(result, point, ctx);
+ break;
+ }
+}
+
+/* RESULT = P1 + P2 (Weierstrass version).*/
+static void add_points_weierstrass(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+#define x1 (p1->x)
+#define y1 (p1->y)
+#define z1 (p1->z)
+#define x2 (p2->x)
+#define y2 (p2->y)
+#define z2 (p2->z)
+#define x3 (result->x)
+#define y3 (result->y)
+#define z3 (result->z)
+#define l1 (ctx->t.scratch[0])
+#define l2 (ctx->t.scratch[1])
+#define l3 (ctx->t.scratch[2])
+#define l4 (ctx->t.scratch[3])
+#define l5 (ctx->t.scratch[4])
+#define l6 (ctx->t.scratch[5])
+#define l7 (ctx->t.scratch[6])
+#define l8 (ctx->t.scratch[7])
+#define l9 (ctx->t.scratch[8])
+#define t1 (ctx->t.scratch[9])
+#define t2 (ctx->t.scratch[10])
+
+ if ((!mpi_cmp(x1, x2)) && (!mpi_cmp(y1, y2)) && (!mpi_cmp(z1, z2))) {
+ /* Same point; need to call the duplicate function. */
+ mpi_ec_dup_point(result, p1, ctx);
+ } else if (!mpi_cmp_ui(z1, 0)) {
+ /* P1 is at infinity. */
+ mpi_set(x3, p2->x);
+ mpi_set(y3, p2->y);
+ mpi_set(z3, p2->z);
+ } else if (!mpi_cmp_ui(z2, 0)) {
+ /* P2 is at infinity. */
+ mpi_set(x3, p1->x);
+ mpi_set(y3, p1->y);
+ mpi_set(z3, p1->z);
+ } else {
+ int z1_is_one = !mpi_cmp_ui(z1, 1);
+ int z2_is_one = !mpi_cmp_ui(z2, 1);
+
+ /* l1 = x1 z2^2 */
+ /* l2 = x2 z1^2 */
+ if (z2_is_one)
+ mpi_set(l1, x1);
+ else {
+ ec_pow2(l1, z2, ctx);
+ ec_mulm(l1, l1, x1, ctx);
+ }
+ if (z1_is_one)
+ mpi_set(l2, x2);
+ else {
+ ec_pow2(l2, z1, ctx);
+ ec_mulm(l2, l2, x2, ctx);
+ }
+ /* l3 = l1 - l2 */
+ ec_subm(l3, l1, l2, ctx);
+ /* l4 = y1 z2^3 */
+ ec_powm(l4, z2, mpi_const(MPI_C_THREE), ctx);
+ ec_mulm(l4, l4, y1, ctx);
+ /* l5 = y2 z1^3 */
+ ec_powm(l5, z1, mpi_const(MPI_C_THREE), ctx);
+ ec_mulm(l5, l5, y2, ctx);
+ /* l6 = l4 - l5 */
+ ec_subm(l6, l4, l5, ctx);
+
+ if (!mpi_cmp_ui(l3, 0)) {
+ if (!mpi_cmp_ui(l6, 0)) {
+ /* P1 and P2 are the same - use duplicate function. */
+ mpi_ec_dup_point(result, p1, ctx);
+ } else {
+ /* P1 is the inverse of P2. */
+ mpi_set_ui(x3, 1);
+ mpi_set_ui(y3, 1);
+ mpi_set_ui(z3, 0);
+ }
+ } else {
+ /* l7 = l1 + l2 */
+ ec_addm(l7, l1, l2, ctx);
+ /* l8 = l4 + l5 */
+ ec_addm(l8, l4, l5, ctx);
+ /* z3 = z1 z2 l3 */
+ ec_mulm(z3, z1, z2, ctx);
+ ec_mulm(z3, z3, l3, ctx);
+ /* x3 = l6^2 - l7 l3^2 */
+ ec_pow2(t1, l6, ctx);
+ ec_pow2(t2, l3, ctx);
+ ec_mulm(t2, t2, l7, ctx);
+ ec_subm(x3, t1, t2, ctx);
+ /* l9 = l7 l3^2 - 2 x3 */
+ ec_mul2(t1, x3, ctx);
+ ec_subm(l9, t2, t1, ctx);
+ /* y3 = (l9 l6 - l8 l3^3)/2 */
+ ec_mulm(l9, l9, l6, ctx);
+ ec_powm(t1, l3, mpi_const(MPI_C_THREE), ctx); /* fixme: Use saved value*/
+ ec_mulm(t1, t1, l8, ctx);
+ ec_subm(y3, l9, t1, ctx);
+ ec_mulm(y3, y3, ec_get_two_inv_p(ctx), ctx);
+ }
+ }
+
+#undef x1
+#undef y1
+#undef z1
+#undef x2
+#undef y2
+#undef z2
+#undef x3
+#undef y3
+#undef z3
+#undef l1
+#undef l2
+#undef l3
+#undef l4
+#undef l5
+#undef l6
+#undef l7
+#undef l8
+#undef l9
+#undef t1
+#undef t2
+}
+
+/* RESULT = P1 + P2 (Montgomery version).*/
+static void add_points_montgomery(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+ (void)result;
+ (void)p1;
+ (void)p2;
+ (void)ctx;
+ log_fatal("%s: %s not yet supported\n",
+ "mpi_ec_add_points", "Montgomery");
+}
+
+/* RESULT = P1 + P2 (Twisted Edwards version).*/
+static void add_points_edwards(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+#define X1 (p1->x)
+#define Y1 (p1->y)
+#define Z1 (p1->z)
+#define X2 (p2->x)
+#define Y2 (p2->y)
+#define Z2 (p2->z)
+#define X3 (result->x)
+#define Y3 (result->y)
+#define Z3 (result->z)
+#define A (ctx->t.scratch[0])
+#define B (ctx->t.scratch[1])
+#define C (ctx->t.scratch[2])
+#define D (ctx->t.scratch[3])
+#define E (ctx->t.scratch[4])
+#define F (ctx->t.scratch[5])
+#define G (ctx->t.scratch[6])
+#define tmp (ctx->t.scratch[7])
+
+ point_resize(result, ctx);
+
+ /* Compute: (X_3 : Y_3 : Z_3) = (X_1 : Y_1 : Z_1) + (X_2 : Y_2 : Z_3) */
+
+ /* A = Z1 · Z2 */
+ ctx->mulm(A, Z1, Z2, ctx);
+
+ /* B = A^2 */
+ ctx->pow2(B, A, ctx);
+
+ /* C = X1 · X2 */
+ ctx->mulm(C, X1, X2, ctx);
+
+ /* D = Y1 · Y2 */
+ ctx->mulm(D, Y1, Y2, ctx);
+
+ /* E = d · C · D */
+ ctx->mulm(E, ctx->b, C, ctx);
+ ctx->mulm(E, E, D, ctx);
+
+ /* F = B - E */
+ ctx->subm(F, B, E, ctx);
+
+ /* G = B + E */
+ ctx->addm(G, B, E, ctx);
+
+ /* X_3 = A · F · ((X_1 + Y_1) · (X_2 + Y_2) - C - D) */
+ ctx->addm(tmp, X1, Y1, ctx);
+ ctx->addm(X3, X2, Y2, ctx);
+ ctx->mulm(X3, X3, tmp, ctx);
+ ctx->subm(X3, X3, C, ctx);
+ ctx->subm(X3, X3, D, ctx);
+ ctx->mulm(X3, X3, F, ctx);
+ ctx->mulm(X3, X3, A, ctx);
+
+ /* Y_3 = A · G · (D - aC) */
+ if (ctx->dialect == ECC_DIALECT_ED25519) {
+ ctx->addm(Y3, D, C, ctx);
+ } else {
+ ctx->mulm(Y3, ctx->a, C, ctx);
+ ctx->subm(Y3, D, Y3, ctx);
+ }
+ ctx->mulm(Y3, Y3, G, ctx);
+ ctx->mulm(Y3, Y3, A, ctx);
+
+ /* Z_3 = F · G */
+ ctx->mulm(Z3, F, G, ctx);
+
+
+#undef X1
+#undef Y1
+#undef Z1
+#undef X2
+#undef Y2
+#undef Z2
+#undef X3
+#undef Y3
+#undef Z3
+#undef A
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef G
+#undef tmp
+}
+
+/* Compute a step of Montgomery Ladder (only use X and Z in the point).
+ * Inputs: P1, P2, and x-coordinate of DIF = P1 - P1.
+ * Outputs: PRD = 2 * P1 and SUM = P1 + P2.
+ */
+static void montgomery_ladder(MPI_POINT prd, MPI_POINT sum,
+ MPI_POINT p1, MPI_POINT p2, MPI dif_x,
+ struct mpi_ec_ctx *ctx)
+{
+ ctx->addm(sum->x, p2->x, p2->z, ctx);
+ ctx->subm(p2->z, p2->x, p2->z, ctx);
+ ctx->addm(prd->x, p1->x, p1->z, ctx);
+ ctx->subm(p1->z, p1->x, p1->z, ctx);
+ ctx->mulm(p2->x, p1->z, sum->x, ctx);
+ ctx->mulm(p2->z, prd->x, p2->z, ctx);
+ ctx->pow2(p1->x, prd->x, ctx);
+ ctx->pow2(p1->z, p1->z, ctx);
+ ctx->addm(sum->x, p2->x, p2->z, ctx);
+ ctx->subm(p2->z, p2->x, p2->z, ctx);
+ ctx->mulm(prd->x, p1->x, p1->z, ctx);
+ ctx->subm(p1->z, p1->x, p1->z, ctx);
+ ctx->pow2(sum->x, sum->x, ctx);
+ ctx->pow2(sum->z, p2->z, ctx);
+ ctx->mulm(prd->z, p1->z, ctx->a, ctx); /* CTX->A: (a-2)/4 */
+ ctx->mulm(sum->z, sum->z, dif_x, ctx);
+ ctx->addm(prd->z, p1->x, prd->z, ctx);
+ ctx->mulm(prd->z, prd->z, p1->z, ctx);
+}
+
+/* RESULT = P1 + P2 */
+void mpi_ec_add_points(MPI_POINT result,
+ MPI_POINT p1, MPI_POINT p2,
+ struct mpi_ec_ctx *ctx)
+{
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ add_points_weierstrass(result, p1, p2, ctx);
+ break;
+ case MPI_EC_MONTGOMERY:
+ add_points_montgomery(result, p1, p2, ctx);
+ break;
+ case MPI_EC_EDWARDS:
+ add_points_edwards(result, p1, p2, ctx);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mpi_ec_add_points);
+
+/* Scalar point multiplication - the main function for ECC. If takes
+ * an integer SCALAR and a POINT as well as the usual context CTX.
+ * RESULT will be set to the resulting point.
+ */
+void mpi_ec_mul_point(MPI_POINT result,
+ MPI scalar, MPI_POINT point,
+ struct mpi_ec_ctx *ctx)
+{
+ MPI x1, y1, z1, k, h, yy;
+ unsigned int i, loops;
+ struct gcry_mpi_point p1, p2, p1inv;
+
+ if (ctx->model == MPI_EC_EDWARDS) {
+ /* Simple left to right binary method. Algorithm 3.27 from
+ * {author={Hankerson, Darrel and Menezes, Alfred J. and Vanstone, Scott},
+ * title = {Guide to Elliptic Curve Cryptography},
+ * year = {2003}, isbn = {038795273X},
+ * url = {http://www.cacr.math.uwaterloo.ca/ecc/},
+ * publisher = {Springer-Verlag New York, Inc.}}
+ */
+ unsigned int nbits;
+ int j;
+
+ if (mpi_cmp(scalar, ctx->p) >= 0)
+ nbits = mpi_get_nbits(scalar);
+ else
+ nbits = mpi_get_nbits(ctx->p);
+
+ mpi_set_ui(result->x, 0);
+ mpi_set_ui(result->y, 1);
+ mpi_set_ui(result->z, 1);
+ point_resize(point, ctx);
+
+ point_resize(result, ctx);
+ point_resize(point, ctx);
+
+ for (j = nbits-1; j >= 0; j--) {
+ mpi_ec_dup_point(result, result, ctx);
+ if (mpi_test_bit(scalar, j))
+ mpi_ec_add_points(result, result, point, ctx);
+ }
+ return;
+ } else if (ctx->model == MPI_EC_MONTGOMERY) {
+ unsigned int nbits;
+ int j;
+ struct gcry_mpi_point p1_, p2_;
+ MPI_POINT q1, q2, prd, sum;
+ unsigned long sw;
+ mpi_size_t rsize;
+
+ /* Compute scalar point multiplication with Montgomery Ladder.
+ * Note that we don't use Y-coordinate in the points at all.
+ * RESULT->Y will be filled by zero.
+ */
+
+ nbits = mpi_get_nbits(scalar);
+ point_init(&p1);
+ point_init(&p2);
+ point_init(&p1_);
+ point_init(&p2_);
+ mpi_set_ui(p1.x, 1);
+ mpi_free(p2.x);
+ p2.x = mpi_copy(point->x);
+ mpi_set_ui(p2.z, 1);
+
+ point_resize(&p1, ctx);
+ point_resize(&p2, ctx);
+ point_resize(&p1_, ctx);
+ point_resize(&p2_, ctx);
+
+ mpi_resize(point->x, ctx->p->nlimbs);
+ point->x->nlimbs = ctx->p->nlimbs;
+
+ q1 = &p1;
+ q2 = &p2;
+ prd = &p1_;
+ sum = &p2_;
+
+ for (j = nbits-1; j >= 0; j--) {
+ MPI_POINT t;
+
+ sw = mpi_test_bit(scalar, j);
+ point_swap_cond(q1, q2, sw, ctx);
+ montgomery_ladder(prd, sum, q1, q2, point->x, ctx);
+ point_swap_cond(prd, sum, sw, ctx);
+ t = q1; q1 = prd; prd = t;
+ t = q2; q2 = sum; sum = t;
+ }
+
+ mpi_clear(result->y);
+ sw = (nbits & 1);
+ point_swap_cond(&p1, &p1_, sw, ctx);
+
+ rsize = p1.z->nlimbs;
+ MPN_NORMALIZE(p1.z->d, rsize);
+ if (rsize == 0) {
+ mpi_set_ui(result->x, 1);
+ mpi_set_ui(result->z, 0);
+ } else {
+ z1 = mpi_new(0);
+ ec_invm(z1, p1.z, ctx);
+ ec_mulm(result->x, p1.x, z1, ctx);
+ mpi_set_ui(result->z, 1);
+ mpi_free(z1);
+ }
+
+ point_free(&p1);
+ point_free(&p2);
+ point_free(&p1_);
+ point_free(&p2_);
+ return;
+ }
+
+ x1 = mpi_alloc_like(ctx->p);
+ y1 = mpi_alloc_like(ctx->p);
+ h = mpi_alloc_like(ctx->p);
+ k = mpi_copy(scalar);
+ yy = mpi_copy(point->y);
+
+ if (mpi_has_sign(k)) {
+ k->sign = 0;
+ ec_invm(yy, yy, ctx);
+ }
+
+ if (!mpi_cmp_ui(point->z, 1)) {
+ mpi_set(x1, point->x);
+ mpi_set(y1, yy);
+ } else {
+ MPI z2, z3;
+
+ z2 = mpi_alloc_like(ctx->p);
+ z3 = mpi_alloc_like(ctx->p);
+ ec_mulm(z2, point->z, point->z, ctx);
+ ec_mulm(z3, point->z, z2, ctx);
+ ec_invm(z2, z2, ctx);
+ ec_mulm(x1, point->x, z2, ctx);
+ ec_invm(z3, z3, ctx);
+ ec_mulm(y1, yy, z3, ctx);
+ mpi_free(z2);
+ mpi_free(z3);
+ }
+ z1 = mpi_copy(mpi_const(MPI_C_ONE));
+
+ mpi_mul(h, k, mpi_const(MPI_C_THREE)); /* h = 3k */
+ loops = mpi_get_nbits(h);
+ if (loops < 2) {
+ /* If SCALAR is zero, the above mpi_mul sets H to zero and thus
+ * LOOPs will be zero. To avoid an underflow of I in the main
+ * loop we set LOOP to 2 and the result to (0,0,0).
+ */
+ loops = 2;
+ mpi_clear(result->x);
+ mpi_clear(result->y);
+ mpi_clear(result->z);
+ } else {
+ mpi_set(result->x, point->x);
+ mpi_set(result->y, yy);
+ mpi_set(result->z, point->z);
+ }
+ mpi_free(yy); yy = NULL;
+
+ p1.x = x1; x1 = NULL;
+ p1.y = y1; y1 = NULL;
+ p1.z = z1; z1 = NULL;
+ point_init(&p2);
+ point_init(&p1inv);
+
+ /* Invert point: y = p - y mod p */
+ point_set(&p1inv, &p1);
+ ec_subm(p1inv.y, ctx->p, p1inv.y, ctx);
+
+ for (i = loops-2; i > 0; i--) {
+ mpi_ec_dup_point(result, result, ctx);
+ if (mpi_test_bit(h, i) == 1 && mpi_test_bit(k, i) == 0) {
+ point_set(&p2, result);
+ mpi_ec_add_points(result, &p2, &p1, ctx);
+ }
+ if (mpi_test_bit(h, i) == 0 && mpi_test_bit(k, i) == 1) {
+ point_set(&p2, result);
+ mpi_ec_add_points(result, &p2, &p1inv, ctx);
+ }
+ }
+
+ point_free(&p1);
+ point_free(&p2);
+ point_free(&p1inv);
+ mpi_free(h);
+ mpi_free(k);
+}
+EXPORT_SYMBOL_GPL(mpi_ec_mul_point);
+
+/* Return true if POINT is on the curve described by CTX. */
+int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx)
+{
+ int res = 0;
+ MPI x, y, w;
+
+ x = mpi_new(0);
+ y = mpi_new(0);
+ w = mpi_new(0);
+
+ /* Check that the point is in range. This needs to be done here and
+ * not after conversion to affine coordinates.
+ */
+ if (mpi_cmpabs(point->x, ctx->p) >= 0)
+ goto leave;
+ if (mpi_cmpabs(point->y, ctx->p) >= 0)
+ goto leave;
+ if (mpi_cmpabs(point->z, ctx->p) >= 0)
+ goto leave;
+
+ switch (ctx->model) {
+ case MPI_EC_WEIERSTRASS:
+ {
+ MPI xxx;
+
+ if (mpi_ec_get_affine(x, y, point, ctx))
+ goto leave;
+
+ xxx = mpi_new(0);
+
+ /* y^2 == x^3 + a·x + b */
+ ec_pow2(y, y, ctx);
+
+ ec_pow3(xxx, x, ctx);
+ ec_mulm(w, ctx->a, x, ctx);
+ ec_addm(w, w, ctx->b, ctx);
+ ec_addm(w, w, xxx, ctx);
+
+ if (!mpi_cmp(y, w))
+ res = 1;
+
+ mpi_free(xxx);
+ }
+ break;
+
+ case MPI_EC_MONTGOMERY:
+ {
+#define xx y
+ /* With Montgomery curve, only X-coordinate is valid. */
+ if (mpi_ec_get_affine(x, NULL, point, ctx))
+ goto leave;
+
+ /* The equation is: b * y^2 == x^3 + a · x^2 + x */
+ /* We check if right hand is quadratic residue or not by
+ * Euler's criterion.
+ */
+ /* CTX->A has (a-2)/4 and CTX->B has b^-1 */
+ ec_mulm(w, ctx->a, mpi_const(MPI_C_FOUR), ctx);
+ ec_addm(w, w, mpi_const(MPI_C_TWO), ctx);
+ ec_mulm(w, w, x, ctx);
+ ec_pow2(xx, x, ctx);
+ ec_addm(w, w, xx, ctx);
+ ec_addm(w, w, mpi_const(MPI_C_ONE), ctx);
+ ec_mulm(w, w, x, ctx);
+ ec_mulm(w, w, ctx->b, ctx);
+#undef xx
+ /* Compute Euler's criterion: w^(p-1)/2 */
+#define p_minus1 y
+ ec_subm(p_minus1, ctx->p, mpi_const(MPI_C_ONE), ctx);
+ mpi_rshift(p_minus1, p_minus1, 1);
+ ec_powm(w, w, p_minus1, ctx);
+
+ res = !mpi_cmp_ui(w, 1);
+#undef p_minus1
+ }
+ break;
+
+ case MPI_EC_EDWARDS:
+ {
+ if (mpi_ec_get_affine(x, y, point, ctx))
+ goto leave;
+
+ mpi_resize(w, ctx->p->nlimbs);
+ w->nlimbs = ctx->p->nlimbs;
+
+ /* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */
+ ctx->pow2(x, x, ctx);
+ ctx->pow2(y, y, ctx);
+ if (ctx->dialect == ECC_DIALECT_ED25519)
+ ctx->subm(w, ctx->p, x, ctx);
+ else
+ ctx->mulm(w, ctx->a, x, ctx);
+ ctx->addm(w, w, y, ctx);
+ ctx->mulm(x, x, y, ctx);
+ ctx->mulm(x, x, ctx->b, ctx);
+ ctx->subm(w, w, x, ctx);
+ if (!mpi_cmp_ui(w, 1))
+ res = 1;
+ }
+ break;
+ }
+
+leave:
+ mpi_free(w);
+ mpi_free(x);
+ mpi_free(y);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(mpi_ec_curve_point);
diff --git a/lib/crypto/mpi/generic_mpih-add1.c b/lib/crypto/mpi/generic_mpih-add1.c
new file mode 100644
index 000000000..299308b54
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-add1.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-add_1.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998,
+ * 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+mpi_limb_t
+mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_ptr_t s2_ptr, mpi_size_t size)
+{
+ mpi_limb_t x, y, cy;
+ mpi_size_t j;
+
+ /* The loop counter and index J goes from -SIZE to -1. This way
+ the loop becomes faster. */
+ j = -size;
+
+ /* Offset the base pointers to compensate for the negative indices. */
+ s1_ptr -= j;
+ s2_ptr -= j;
+ res_ptr -= j;
+
+ cy = 0;
+ do {
+ y = s2_ptr[j];
+ x = s1_ptr[j];
+ y += cy; /* add previous carry to one addend */
+ cy = y < cy; /* get out carry from that addition */
+ y += x; /* add other addend */
+ cy += y < x; /* get out carry from that add, combine */
+ res_ptr[j] = y;
+ } while (++j);
+
+ return cy;
+}
diff --git a/lib/crypto/mpi/generic_mpih-lshift.c b/lib/crypto/mpi/generic_mpih-lshift.c
new file mode 100644
index 000000000..7b21f5938
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-lshift.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-lshift.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+
+/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
+ * and store the USIZE least significant digits of the result at WP.
+ * Return the bits shifted out from the most significant digit.
+ *
+ * Argument constraints:
+ * 1. 0 < CNT < BITS_PER_MP_LIMB
+ * 2. If the result is to be written over the input, WP must be >= UP.
+ */
+
+mpi_limb_t
+mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt)
+{
+ mpi_limb_t high_limb, low_limb;
+ unsigned sh_1, sh_2;
+ mpi_size_t i;
+ mpi_limb_t retval;
+
+ sh_1 = cnt;
+ wp += 1;
+ sh_2 = BITS_PER_MPI_LIMB - sh_1;
+ i = usize - 1;
+ low_limb = up[i];
+ retval = low_limb >> sh_2;
+ high_limb = low_limb;
+ while (--i >= 0) {
+ low_limb = up[i];
+ wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
+ high_limb = low_limb;
+ }
+ wp[i] = high_limb << sh_1;
+
+ return retval;
+}
diff --git a/lib/crypto/mpi/generic_mpih-mul1.c b/lib/crypto/mpi/generic_mpih-mul1.c
new file mode 100644
index 000000000..e020e61d4
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-mul1.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-mul_1.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+mpi_limb_t
+mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+ mpi_limb_t s2_limb)
+{
+ mpi_limb_t cy_limb;
+ mpi_size_t j;
+ mpi_limb_t prod_high, prod_low;
+
+ /* The loop counter and index J goes from -S1_SIZE to -1. This way
+ * the loop becomes faster. */
+ j = -s1_size;
+
+ /* Offset the base pointers to compensate for the negative indices. */
+ s1_ptr -= j;
+ res_ptr -= j;
+
+ cy_limb = 0;
+ do {
+ umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
+ prod_low += cy_limb;
+ cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
+ res_ptr[j] = prod_low;
+ } while (++j);
+
+ return cy_limb;
+}
diff --git a/lib/crypto/mpi/generic_mpih-mul2.c b/lib/crypto/mpi/generic_mpih-mul2.c
new file mode 100644
index 000000000..9484d8528
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-mul2.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-mul_2.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+mpi_limb_t
+mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+ mpi_limb_t cy_limb;
+ mpi_size_t j;
+ mpi_limb_t prod_high, prod_low;
+ mpi_limb_t x;
+
+ /* The loop counter and index J goes from -SIZE to -1. This way
+ * the loop becomes faster. */
+ j = -s1_size;
+ res_ptr -= j;
+ s1_ptr -= j;
+
+ cy_limb = 0;
+ do {
+ umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
+
+ prod_low += cy_limb;
+ cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
+
+ x = res_ptr[j];
+ prod_low = x + prod_low;
+ cy_limb += prod_low < x ? 1 : 0;
+ res_ptr[j] = prod_low;
+ } while (++j);
+ return cy_limb;
+}
diff --git a/lib/crypto/mpi/generic_mpih-mul3.c b/lib/crypto/mpi/generic_mpih-mul3.c
new file mode 100644
index 000000000..ccdbab412
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-mul3.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-mul_3.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+mpi_limb_t
+mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+ mpi_limb_t cy_limb;
+ mpi_size_t j;
+ mpi_limb_t prod_high, prod_low;
+ mpi_limb_t x;
+
+ /* The loop counter and index J goes from -SIZE to -1. This way
+ * the loop becomes faster. */
+ j = -s1_size;
+ res_ptr -= j;
+ s1_ptr -= j;
+
+ cy_limb = 0;
+ do {
+ umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
+
+ prod_low += cy_limb;
+ cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
+
+ x = res_ptr[j];
+ prod_low = x - prod_low;
+ cy_limb += prod_low > x ? 1 : 0;
+ res_ptr[j] = prod_low;
+ } while (++j);
+
+ return cy_limb;
+}
diff --git a/lib/crypto/mpi/generic_mpih-rshift.c b/lib/crypto/mpi/generic_mpih-rshift.c
new file mode 100644
index 000000000..e07bc69aa
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-rshift.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpih-rshift.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1998, 1999,
+ * 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GNUPG
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+
+/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right
+ * and store the USIZE least significant limbs of the result at WP.
+ * The bits shifted out to the right are returned.
+ *
+ * Argument constraints:
+ * 1. 0 < CNT < BITS_PER_MP_LIMB
+ * 2. If the result is to be written over the input, WP must be <= UP.
+ */
+
+mpi_limb_t
+mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt)
+{
+ mpi_limb_t high_limb, low_limb;
+ unsigned sh_1, sh_2;
+ mpi_size_t i;
+ mpi_limb_t retval;
+
+ sh_1 = cnt;
+ wp -= 1;
+ sh_2 = BITS_PER_MPI_LIMB - sh_1;
+ high_limb = up[0];
+ retval = high_limb << sh_2;
+ low_limb = high_limb;
+ for (i = 1; i < usize; i++) {
+ high_limb = up[i];
+ wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
+ low_limb = high_limb;
+ }
+ wp[i] = low_limb >> sh_1;
+
+ return retval;
+}
diff --git a/lib/crypto/mpi/generic_mpih-sub1.c b/lib/crypto/mpi/generic_mpih-sub1.c
new file mode 100644
index 000000000..eea4382aa
--- /dev/null
+++ b/lib/crypto/mpi/generic_mpih-sub1.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-add_2.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+mpi_limb_t
+mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_ptr_t s2_ptr, mpi_size_t size)
+{
+ mpi_limb_t x, y, cy;
+ mpi_size_t j;
+
+ /* The loop counter and index J goes from -SIZE to -1. This way
+ the loop becomes faster. */
+ j = -size;
+
+ /* Offset the base pointers to compensate for the negative indices. */
+ s1_ptr -= j;
+ s2_ptr -= j;
+ res_ptr -= j;
+
+ cy = 0;
+ do {
+ y = s2_ptr[j];
+ x = s1_ptr[j];
+ y += cy; /* add previous carry to subtrahend */
+ cy = y < cy; /* get out carry from that addition */
+ y = x - y; /* main subtract */
+ cy += y > x; /* get out carry from the subtract, combine */
+ res_ptr[j] = y;
+ } while (++j);
+
+ return cy;
+}
diff --git a/lib/crypto/mpi/longlong.h b/lib/crypto/mpi/longlong.h
new file mode 100644
index 000000000..b6fa1d08f
--- /dev/null
+++ b/lib/crypto/mpi/longlong.h
@@ -0,0 +1,1361 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+ * Note: I added some stuff for use with gnupg
+ *
+ * Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998,
+ * 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Library General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public License
+ * along with this file; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA. */
+
+#include <linux/count_zeros.h>
+
+/* You have to define the following before including this file:
+ *
+ * UWtype -- An unsigned type, default type for operations (typically a "word")
+ * UHWtype -- An unsigned type, at least half the size of UWtype.
+ * UDWtype -- An unsigned type, at least twice as large a UWtype
+ * W_TYPE_SIZE -- size in bits of UWtype
+ *
+ * SItype, USItype -- Signed and unsigned 32 bit types.
+ * DItype, UDItype -- Signed and unsigned 64 bit types.
+ *
+ * On a 32 bit machine UWtype should typically be USItype;
+ * on a 64 bit machine, UWtype should typically be UDItype.
+*/
+
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* This is used to make sure no undesirable sharing between different libraries
+ that use this file takes place. */
+#ifndef __MPN
+#define __MPN(x) __##x
+#endif
+
+/* Define auxiliary asm macros.
+ *
+ * 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ * UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
+ * word product in HIGH_PROD and LOW_PROD.
+ *
+ * 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
+ * UDWtype product. This is just a variant of umul_ppmm.
+
+ * 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ * denominator) divides a UDWtype, composed by the UWtype integers
+ * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
+ * than DENOMINATOR for correct operation. If, in addition, the most
+ * significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ * UDIV_NEEDS_NORMALIZATION is defined to 1.
+ * 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ * denominator). Like udiv_qrnnd but the numbers are signed. The quotient
+ * is rounded towards 0.
+ *
+ * 5) count_leading_zeros(count, x) counts the number of zero-bits from the
+ * msb to the first non-zero bit in the UWtype X. This is the number of
+ * steps X needs to be shifted left to set the msb. Undefined for X == 0,
+ * unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
+ *
+ * 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
+ * from the least significant end.
+ *
+ * 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ * high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
+ * (i.e. carry out) is not stored anywhere, and is lost.
+ *
+ * 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
+ * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ * and is lost.
+ *
+ * If any of these macros are left undefined for a particular CPU,
+ * C macros are used. */
+
+/* The CPUs come in alphabetical order below.
+ *
+ * Please add support for more CPUs here, or improve the current support
+ * for the CPUs below! */
+
+#if defined(__GNUC__) && !defined(NO_ASM)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+ understood by gcc1. Use cpp to avoid major code duplication. */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+/***************************************
+ ************** A29K *****************
+ ***************************************/
+#if (defined(__a29k__) || defined(_AM29K)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %1,%4,%5\n" \
+ "addc %0,%2,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %1,%4,%5\n" \
+ "subc %0,%2,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("multiplu %0,%1,%2" \
+ : "=r" ((USItype)(xl)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ __asm__ ("multmu %0,%1,%2" \
+ : "=r" ((USItype)(xh)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+} while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("dividu %0,%3,%4" \
+ : "=r" ((USItype)(q)), \
+ "=q" ((USItype)(r)) \
+ : "1" ((USItype)(n1)), \
+ "r" ((USItype)(n0)), \
+ "r" ((USItype)(d)))
+#endif /* __a29k__ */
+
+#if defined(__alpha) && W_TYPE_SIZE == 64
+#define umul_ppmm(ph, pl, m0, m1) \
+do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ (ph) = __builtin_alpha_umulh(__m0, __m1); \
+ (pl) = __m0 * __m1; \
+} while (0)
+#define UMUL_TIME 46
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+do { UDItype __r; \
+ (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+} while (0)
+extern UDItype __udiv_qrnnd(UDItype *, UDItype, UDItype, UDItype);
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+#endif /* __alpha */
+
+/***************************************
+ ************** ARM ******************
+ ***************************************/
+#if defined(__arm__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("adds %1, %4, %5\n" \
+ "adc %0, %2, %3" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "%r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subs %1, %4, %5\n" \
+ "sbc %0, %2, %3" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "r" ((USItype)(al)), \
+ "rI" ((USItype)(bl)))
+#if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("@ Inlined umul_ppmm\n" \
+ "mov %|r0, %2, lsr #16 @ AAAA\n" \
+ "mov %|r2, %3, lsr #16 @ BBBB\n" \
+ "bic %|r1, %2, %|r0, lsl #16 @ aaaa\n" \
+ "bic %0, %3, %|r2, lsl #16 @ bbbb\n" \
+ "mul %1, %|r1, %|r2 @ aaaa * BBBB\n" \
+ "mul %|r2, %|r0, %|r2 @ AAAA * BBBB\n" \
+ "mul %|r1, %0, %|r1 @ aaaa * bbbb\n" \
+ "mul %0, %|r0, %0 @ AAAA * bbbb\n" \
+ "adds %|r0, %1, %0 @ central sum\n" \
+ "addcs %|r2, %|r2, #65536\n" \
+ "adds %1, %|r1, %|r0, lsl #16\n" \
+ "adc %0, %|r2, %|r0, lsr #16" \
+ : "=&r" (xh), \
+ "=r" (xl) \
+ : "r" ((USItype)(a)), \
+ "r" ((USItype)(b)) \
+ : "r0", "r1", "r2")
+#else
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("@ Inlined umul_ppmm\n" \
+ "umull %1, %0, %2, %3" \
+ : "=&r" (xh), \
+ "=&r" (xl) \
+ : "r" ((USItype)(a)), \
+ "r" ((USItype)(b)) \
+ : "r0", "r1")
+#endif
+#define UMUL_TIME 20
+#define UDIV_TIME 100
+#endif /* __arm__ */
+
+/***************************************
+ ************** CLIPPER **************
+ ***************************************/
+#if defined(__clipper__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __xx; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__xx.__ll) \
+ : "%0" ((USItype)(u)), \
+ "r" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
+#define smul_ppmm(w1, w0, u, v) \
+ ({union {DItype __ll; \
+ struct {SItype __l, __h; } __i; \
+ } __xx; \
+ __asm__ ("mulwx %2,%0" \
+ : "=r" (__xx.__ll) \
+ : "%0" ((SItype)(u)), \
+ "r" ((SItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__w) \
+ : "%0" ((USItype)(u)), \
+ "r" ((USItype)(v))); \
+ __w; })
+#endif /* __clipper__ */
+
+/***************************************
+ ************** GMICRO ***************
+ ***************************************/
+#if defined(__gmicro__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.w %5,%1\n" \
+ "addx %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.w %5,%1\n" \
+ "subx %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ __asm__ ("mulx %3,%0,%1" \
+ : "=g" ((USItype)(ph)), \
+ "=r" ((USItype)(pl)) \
+ : "%0" ((USItype)(m0)), \
+ "g" ((USItype)(m1)))
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("divx %4,%0,%1" \
+ : "=g" ((USItype)(q)), \
+ "=r" ((USItype)(r)) \
+ : "1" ((USItype)(nh)), \
+ "0" ((USItype)(nl)), \
+ "g" ((USItype)(d)))
+#endif
+
+/***************************************
+ ************** HPPA *****************
+ ***************************************/
+#if defined(__hppa) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %4,%5,%1\n" \
+ "addc %2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rM" ((USItype)(ah)), \
+ "rM" ((USItype)(bh)), \
+ "%rM" ((USItype)(al)), \
+ "rM" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %4,%5,%1\n" \
+ "subb %2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rM" ((USItype)(ah)), \
+ "rM" ((USItype)(bh)), \
+ "rM" ((USItype)(al)), \
+ "rM" ((USItype)(bl)))
+#if 0 && defined(_PA_RISC1_1)
+/* xmpyu uses floating point register which is not allowed in Linux kernel. */
+#define umul_ppmm(wh, wl, u, v) \
+do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __xx; \
+ __asm__ ("xmpyu %1,%2,%0" \
+ : "=*f" (__xx.__ll) \
+ : "*f" ((USItype)(u)), \
+ "*f" ((USItype)(v))); \
+ (wh) = __xx.__i.__h; \
+ (wl) = __xx.__i.__l; \
+} while (0)
+#define UMUL_TIME 8
+#define UDIV_TIME 60
+#else
+#define UMUL_TIME 40
+#define UDIV_TIME 80
+#endif
+#if 0 /* #ifndef LONGLONG_STANDALONE */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+do { USItype __r; \
+ (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+} while (0)
+extern USItype __udiv_qrnnd();
+#endif /* LONGLONG_STANDALONE */
+#endif /* hppa */
+
+/***************************************
+ ************** I370 *****************
+ ***************************************/
+#if (defined(__i370__) || defined(__mvs__)) && W_TYPE_SIZE == 32
+#define umul_ppmm(xh, xl, m0, m1) \
+do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __xx; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mr %0,%3" \
+ : "=r" (__xx.__i.__h), \
+ "=r" (__xx.__i.__l) \
+ : "%1" (__m0), \
+ "r" (__m1)); \
+ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+} while (0)
+#define smul_ppmm(xh, xl, m0, m1) \
+do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __xx; \
+ __asm__ ("mr %0,%3" \
+ : "=r" (__xx.__i.__h), \
+ "=r" (__xx.__i.__l) \
+ : "%1" (m0), \
+ "r" (m1)); \
+ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
+} while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __xx; \
+ __xx.__i.__h = n1; __xx.__i.__l = n0; \
+ __asm__ ("dr %0,%2" \
+ : "=r" (__xx.__ll) \
+ : "0" (__xx.__ll), "r" (d)); \
+ (q) = __xx.__i.__l; (r) = __xx.__i.__h; \
+} while (0)
+#endif
+
+/***************************************
+ ************** I386 *****************
+ ***************************************/
+#undef __i386__
+#if (defined(__i386__) || defined(__i486__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl %5,%1\n" \
+ "adcl %3,%0" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl %5,%1\n" \
+ "sbbl %3,%0" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mull %3" \
+ : "=a" (w0), \
+ "=d" (w1) \
+ : "%0" ((USItype)(u)), \
+ "rm" ((USItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divl %4" \
+ : "=a" (q), \
+ "=d" (r) \
+ : "0" ((USItype)(n0)), \
+ "1" ((USItype)(n1)), \
+ "rm" ((USItype)(d)))
+#ifndef UMUL_TIME
+#define UMUL_TIME 40
+#endif
+#ifndef UDIV_TIME
+#define UDIV_TIME 40
+#endif
+#endif /* 80x86 */
+
+/***************************************
+ ************** I860 *****************
+ ***************************************/
+#if defined(__i860__) && W_TYPE_SIZE == 32
+#define rshift_rhlc(r, h, l, c) \
+ __asm__ ("shr %3,r0,r0\n" \
+ "shrd %1,%2,%0" \
+ "=r" (r) : "r" (h), "r" (l), "rn" (c))
+#endif /* i860 */
+
+/***************************************
+ ************** I960 *****************
+ ***************************************/
+#if defined(__i960__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("cmpo 1,0\n" \
+ "addc %5,%4,%1\n" \
+ "addc %3,%2,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%dI" ((USItype)(ah)), \
+ "dI" ((USItype)(bh)), \
+ "%dI" ((USItype)(al)), \
+ "dI" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("cmpo 0,0\n" \
+ "subc %5,%4,%1\n" \
+ "subc %3,%2,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "dI" ((USItype)(ah)), \
+ "dI" ((USItype)(bh)), \
+ "dI" ((USItype)(al)), \
+ "dI" ((USItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __xx; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__xx.__ll) \
+ : "%dI" ((USItype)(u)), \
+ "dI" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__w) \
+ : "%dI" ((USItype)(u)), \
+ "dI" ((USItype)(v))); \
+ __w; })
+#define udiv_qrnnd(q, r, nh, nl, d) \
+do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __nn; \
+ __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
+ __asm__ ("ediv %d,%n,%0" \
+ : "=d" (__rq.__ll) \
+ : "dI" (__nn.__ll), \
+ "dI" ((USItype)(d))); \
+ (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
+} while (0)
+#if defined(__i960mx) /* what is the proper symbol to test??? */
+#define rshift_rhlc(r, h, l, c) \
+do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __nn; \
+ __nn.__i.__h = (h); __nn.__i.__l = (l); \
+ __asm__ ("shre %2,%1,%0" \
+ : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
+}
+#endif /* i960mx */
+#endif /* i960 */
+
+/***************************************
+ ************** 68000 ****************
+ ***************************************/
+#if (defined(__mc68000__) || defined(__mc68020__) || defined(__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%.l %5,%1\n" \
+ "addx%.l %3,%0" \
+ : "=d" ((USItype)(sh)), \
+ "=&d" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "d" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%.l %5,%1\n" \
+ "subx%.l %3,%0" \
+ : "=d" ((USItype)(sh)), \
+ "=&d" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "d" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#if (defined(__mc68020__) || defined(__NeXT__) || defined(mc68020))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "%0" ((USItype)(u)), \
+ "dmi" ((USItype)(v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divu%.l %4,%1:%0" \
+ : "=d" ((USItype)(q)), \
+ "=d" ((USItype)(r)) \
+ : "0" ((USItype)(n0)), \
+ "1" ((USItype)(n1)), \
+ "dmi" ((USItype)(d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divs%.l %4,%1:%0" \
+ : "=d" ((USItype)(q)), \
+ "=d" ((USItype)(r)) \
+ : "0" ((USItype)(n0)), \
+ "1" ((USItype)(n1)), \
+ "dmi" ((USItype)(d)))
+#else /* not mc68020 */
+#define umul_ppmm(xh, xl, a, b) \
+do { USItype __umul_tmp1, __umul_tmp2; \
+ __asm__ ("| Inlined umul_ppmm\n" \
+ "move%.l %5,%3\n" \
+ "move%.l %2,%0\n" \
+ "move%.w %3,%1\n" \
+ "swap %3\n" \
+ "swap %0\n" \
+ "mulu %2,%1\n" \
+ "mulu %3,%0\n" \
+ "mulu %2,%3\n" \
+ "swap %2\n" \
+ "mulu %5,%2\n" \
+ "add%.l %3,%2\n" \
+ "jcc 1f\n" \
+ "add%.l %#0x10000,%0\n" \
+ "1: move%.l %2,%3\n" \
+ "clr%.w %2\n" \
+ "swap %2\n" \
+ "swap %3\n" \
+ "clr%.w %3\n" \
+ "add%.l %3,%1\n" \
+ "addx%.l %2,%0\n" \
+ "| End inlined umul_ppmm" \
+ : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \
+ "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \
+ : "%2" ((USItype)(a)), "d" ((USItype)(b))); \
+} while (0)
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mc68020 */
+#endif /* mc68000 */
+
+/***************************************
+ ************** 88000 ****************
+ ***************************************/
+#if defined(__m88000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addu.co %1,%r4,%r5\n" \
+ "addu.ci %0,%r2,%r3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rJ" ((USItype)(ah)), \
+ "rJ" ((USItype)(bh)), \
+ "%rJ" ((USItype)(al)), \
+ "rJ" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subu.co %1,%r4,%r5\n" \
+ "subu.ci %0,%r2,%r3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rJ" ((USItype)(ah)), \
+ "rJ" ((USItype)(bh)), \
+ "rJ" ((USItype)(al)), \
+ "rJ" ((USItype)(bl)))
+#if defined(__m88110__)
+#define umul_ppmm(wh, wl, u, v) \
+do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __x; \
+ __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
+ (wh) = __x.__i.__h; \
+ (wl) = __x.__i.__l; \
+} while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __x, __q; \
+ __x.__i.__h = (n1); __x.__i.__l = (n0); \
+ __asm__ ("divu.d %0,%1,%2" \
+ : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \
+ (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __m88110__ */
+#endif /* __m88000__ */
+
+/***************************************
+ ************** MIPS *****************
+ ***************************************/
+#if defined(__mips__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ UDItype __ll = (UDItype)(u) * (v); \
+ w1 = __ll >> 32; \
+ w0 = __ll; \
+} while (0)
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips__ */
+
+/***************************************
+ ************** MIPS/64 **************
+ ***************************************/
+#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
+#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC)
+/*
+ * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
+ * code below, so we special case MIPS64r6 until the compiler can do better.
+ */
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ __asm__ ("dmulu %0,%1,%2" \
+ : "=d" ((UDItype)(w0)) \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+ __asm__ ("dmuhu %0,%1,%2" \
+ : "=d" ((UDItype)(w1)) \
+ : "d" ((UDItype)(u)), \
+ "d" ((UDItype)(v))); \
+} while (0)
+#else
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
+ __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
+ w1 = __ll >> 64; \
+ w0 = __ll; \
+} while (0)
+#endif
+#define UMUL_TIME 20
+#define UDIV_TIME 140
+#endif /* __mips__ */
+
+/***************************************
+ ************** 32000 ****************
+ ***************************************/
+#if defined(__ns32000__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __xx; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "%0" ((USItype)(u)), \
+ "g" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__w) \
+ : "%0" ((USItype)(u)), \
+ "g" ((USItype)(v))); \
+ __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __xx; \
+ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
+ __asm__ ("deid %2,%0" \
+ : "=g" (__xx.__ll) \
+ : "0" (__xx.__ll), \
+ "g" ((USItype)(d))); \
+ (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
+#endif /* __ns32000__ */
+
+/***************************************
+ ************** PPC ******************
+ ***************************************/
+#if (defined(_ARCH_PPC) || defined(_IBMR2)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+do { \
+ if (__builtin_constant_p(bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "%r" ((USItype)(ah)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl))); \
+ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "%r" ((USItype)(ah)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl))); \
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "%r" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "%r" ((USItype)(al)), \
+ "rI" ((USItype)(bl))); \
+} while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+do { \
+ if (__builtin_constant_p(ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype)(bh)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype)(bh)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else if (__builtin_constant_p(bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype)(ah)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "rI" ((USItype)(al)), \
+ "r" ((USItype)(bl))); \
+} while (0)
+#if defined(_ARCH_PPC)
+#define umul_ppmm(ph, pl, m0, m1) \
+do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" \
+ : "=r" (ph) \
+ : "%r" (__m0), \
+ "r" (__m1)); \
+ (pl) = __m0 * __m1; \
+} while (0)
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+do { \
+ SItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhw %0,%1,%2" \
+ : "=r" ((SItype) ph) \
+ : "%r" (__m0), \
+ "r" (__m1)); \
+ (pl) = __m0 * __m1; \
+} while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#else
+#define umul_ppmm(xh, xl, m0, m1) \
+do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mul %0,%2,%3" \
+ : "=r" ((USItype)(xh)), \
+ "=q" ((USItype)(xl)) \
+ : "r" (__m0), \
+ "r" (__m1)); \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+} while (0)
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+ __asm__ ("mul %0,%2,%3" \
+ : "=r" ((SItype)(xh)), \
+ "=q" ((SItype)(xl)) \
+ : "r" (m0), \
+ "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("div %0,%2,%4" \
+ : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
+ : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
+#define UDIV_TIME 100
+#endif
+#endif /* Power architecture variants. */
+
+/***************************************
+ ************** PYR ******************
+ ***************************************/
+#if defined(__pyr__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addw %5,%1\n" \
+ "addwc %3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subw %5,%1\n" \
+ "subwb %3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+ /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l; } __i; \
+ } __xx; \
+ __asm__ ("movw %1,%R0\n" \
+ "uemul %2,%0" \
+ : "=&r" (__xx.__ll) \
+ : "g" ((USItype) (u)), \
+ "g" ((USItype)(v))); \
+ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; })
+#endif /* __pyr__ */
+
+/***************************************
+ ************** RT/ROMP **************
+ ***************************************/
+#if defined(__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("a %1,%5\n" \
+ "ae %0,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "r" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("s %1,%5\n" \
+ "se %0,%3" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "r" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "r" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ( \
+ "s r2,r2\n" \
+ "mts r10,%2\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "m r2,%3\n" \
+ "cas %0,r2,r0\n" \
+ "mfs r10,%1" \
+ : "=r" ((USItype)(ph)), \
+ "=r" ((USItype)(pl)) \
+ : "%r" (__m0), \
+ "r" (__m1) \
+ : "r2"); \
+ (ph) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+} while (0)
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#endif /* RT/ROMP */
+
+/***************************************
+ ************** SH2 ******************
+ ***************************************/
+#if (defined(__sh2__) || defined(__sh3__) || defined(__SH4__)) \
+ && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ( \
+ "dmulu.l %2,%3\n" \
+ "sts macl,%1\n" \
+ "sts mach,%0" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)) \
+ : "macl", "mach")
+#define UMUL_TIME 5
+#endif
+
+/***************************************
+ ************** SPARC ****************
+ ***************************************/
+#if defined(__sparc__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1\n" \
+ "addx %r2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "%rJ" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "%rJ" ((USItype)(al)), \
+ "rI" ((USItype)(bl)) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1\n" \
+ "subx %r2,%3,%0" \
+ : "=r" ((USItype)(sh)), \
+ "=&r" ((USItype)(sl)) \
+ : "rJ" ((USItype)(ah)), \
+ "rI" ((USItype)(bh)), \
+ "rJ" ((USItype)(al)), \
+ "rI" ((USItype)(bl)) \
+ __CLOBBER_CC)
+#if defined(__sparc_v8__)
+/* Don't match immediate range because, 1) it is not often useful,
+ 2) the 'I' flag thinks of the range as a 13 bit signed interval,
+ while we want to match a 13 bit interval, sign extended to 32 bits,
+ but INTERPRETED AS UNSIGNED. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)))
+#define UMUL_TIME 5
+#ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+do { \
+ USItype __q; \
+ __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
+ : "=r" ((USItype)(__q)) \
+ : "r" ((USItype)(n1)), \
+ "r" ((USItype)(n0)), \
+ "r" ((USItype)(d))); \
+ (r) = (n0) - __q * (d); \
+ (q) = __q; \
+} while (0)
+#define UDIV_TIME 25
+#endif /* SUPERSPARC */
+#else /* ! __sparc_v8__ */
+#if defined(__sparclite__)
+/* This has hardware multiply but not divide. It also has two additional
+ instructions scan (ffs from high bit) and divscc. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "r" ((USItype)(u)), \
+ "r" ((USItype)(v)))
+#define UMUL_TIME 5
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd\n" \
+ "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
+ "tst %%g0\n" \
+ "divscc %3,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%%g1\n" \
+ "divscc %%g1,%4,%0\n" \
+ "rd %%y,%1\n" \
+ "bl,a 1f\n" \
+ "add %1,%4,%1\n" \
+ "1: ! End of inline udiv_qrnnd" \
+ : "=r" ((USItype)(q)), \
+ "=r" ((USItype)(r)) \
+ : "r" ((USItype)(n1)), \
+ "r" ((USItype)(n0)), \
+ "rI" ((USItype)(d)) \
+ : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+ /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
+#ifndef umul_ppmm
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm\n" \
+ "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
+ "sra %3,31,%%g2 ! Don't move this insn\n" \
+ "and %2,%%g2,%%g2 ! Don't move this insn\n" \
+ "andcc %%g0,0,%%g1 ! Don't move this insn\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,%3,%%g1\n" \
+ "mulscc %%g1,0,%%g1\n" \
+ "add %%g1,%%g2,%0\n" \
+ "rd %%y,%1" \
+ : "=r" ((USItype)(w1)), \
+ "=r" ((USItype)(w0)) \
+ : "%rI" ((USItype)(u)), \
+ "r" ((USItype)(v)) \
+ : "%g1", "%g2" __AND_CLOBBER_CC)
+#define UMUL_TIME 39 /* 39 instructions */
+/* It's quite necessary to add this much assembler for the sparc.
+ The default udiv_qrnnd (in C) is more than 10 times slower! */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd\n\t" \
+ "mov 32,%%g1\n\t" \
+ "subcc %1,%2,%%g0\n\t" \
+ "1: bcs 5f\n\t" \
+ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \
+ "sub %1,%2,%1 ! this kills msb of n\n\t" \
+ "addx %1,%1,%1 ! so this can't give carry\n\t" \
+ "subcc %%g1,1,%%g1\n\t" \
+ "2: bne 1b\n\t" \
+ "subcc %1,%2,%%g0\n\t" \
+ "bcs 3f\n\t" \
+ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \
+ "b 3f\n\t" \
+ "sub %1,%2,%1 ! this kills msb of n\n\t" \
+ "4: sub %1,%2,%1\n\t" \
+ "5: addxcc %1,%1,%1\n\t" \
+ "bcc 2b\n\t" \
+ "subcc %%g1,1,%%g1\n\t" \
+ "! Got carry from n. Subtract next step to cancel this carry.\n\t" \
+ "bne 4b\n\t" \
+ "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \
+ "sub %1,%2,%1\n\t" \
+ "3: xnor %0,0,%0\n\t" \
+ "! End of inline udiv_qrnnd\n" \
+ : "=&r" ((USItype)(q)), \
+ "=&r" ((USItype)(r)) \
+ : "r" ((USItype)(d)), \
+ "1" ((USItype)(n1)), \
+ "0" ((USItype)(n0)) : "%g1", "cc")
+#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
+#endif
+#endif /* __sparc__ */
+
+/***************************************
+ ************** VAX ******************
+ ***************************************/
+#if defined(__vax__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl2 %5,%1\n" \
+ "adwc %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "%0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl2 %5,%1\n" \
+ "sbwc %3,%0" \
+ : "=g" ((USItype)(sh)), \
+ "=&g" ((USItype)(sl)) \
+ : "0" ((USItype)(ah)), \
+ "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), \
+ "g" ((USItype)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h; } __i; \
+ } __xx; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("emul %1,%2,$0,%0" \
+ : "=g" (__xx.__ll) \
+ : "g" (__m0), \
+ "g" (__m1)); \
+ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
+ (xh) += ((((SItype) __m0 >> 31) & __m1) \
+ + (((SItype) __m1 >> 31) & __m0)); \
+} while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+do { \
+ union {DItype __ll; \
+ struct {SItype __l, __h; } __i; \
+ } __xx; \
+ __xx.__i.__h = n1; __xx.__i.__l = n0; \
+ __asm__ ("ediv %3,%2,%0,%1" \
+ : "=g" (q), "=g" (r) \
+ : "g" (__xx.__ll), "g" (d)); \
+} while (0)
+#endif /* __vax__ */
+
+/***************************************
+ ************** Z8000 ****************
+ ***************************************/
+#if defined(__z8000__) && W_TYPE_SIZE == 16
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
+ : "=r" ((unsigned int)(sh)), \
+ "=&r" ((unsigned int)(sl)) \
+ : "%0" ((unsigned int)(ah)), \
+ "r" ((unsigned int)(bh)), \
+ "%1" ((unsigned int)(al)), \
+ "rQR" ((unsigned int)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
+ : "=r" ((unsigned int)(sh)), \
+ "=&r" ((unsigned int)(sl)) \
+ : "0" ((unsigned int)(ah)), \
+ "r" ((unsigned int)(bh)), \
+ "1" ((unsigned int)(al)), \
+ "rQR" ((unsigned int)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+do { \
+ union {long int __ll; \
+ struct {unsigned int __h, __l; } __i; \
+ } __xx; \
+ unsigned int __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mult %S0,%H3" \
+ : "=r" (__xx.__i.__h), \
+ "=r" (__xx.__i.__l) \
+ : "%1" (__m0), \
+ "rQR" (__m1)); \
+ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
+ (xh) += ((((signed int) __m0 >> 15) & __m1) \
+ + (((signed int) __m1 >> 15) & __m0)); \
+} while (0)
+#endif /* __z8000__ */
+
+#endif /* __GNUC__ */
+
+/***************************************
+ *********** Generic Versions ********
+ ***************************************/
+#if !defined(umul_ppmm) && defined(__umulsidi3)
+#define umul_ppmm(ph, pl, m0, m1) \
+{ \
+ UDWtype __ll = __umulsidi3(m0, m1); \
+ ph = (UWtype) (__ll >> W_TYPE_SIZE); \
+ pl = (UWtype) __ll; \
+}
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) \
+ ({UWtype __hi, __lo; \
+ umul_ppmm(__hi, __lo, u, v); \
+ ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
+#endif
+
+ /* If this machine has no inline assembler, use C macros. */
+
+#if !defined(add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+do { \
+ UWtype __x; \
+ __x = (al) + (bl); \
+ (sh) = (ah) + (bh) + (__x < (al)); \
+ (sl) = __x; \
+} while (0)
+#endif
+
+#if !defined(sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+do { \
+ UWtype __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+} while (0)
+#endif
+
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+do { \
+ UWtype __x0, __x1, __x2, __x3; \
+ UHWtype __ul, __vl, __uh, __vh; \
+ UWtype __u = (u), __v = (v); \
+ \
+ __ul = __ll_lowpart(__u); \
+ __uh = __ll_highpart(__u); \
+ __vl = __ll_lowpart(__v); \
+ __vh = __ll_highpart(__v); \
+ \
+ __x0 = (UWtype) __ul * __vl; \
+ __x1 = (UWtype) __ul * __vh; \
+ __x2 = (UWtype) __uh * __vl; \
+ __x3 = (UWtype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart(__x0);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos. */ \
+ \
+ (w1) = __x3 + __ll_highpart(__x1); \
+ (w0) = (__ll_lowpart(__x1) << W_TYPE_SIZE/2) + __ll_lowpart(__x0); \
+} while (0)
+#endif
+
+#if !defined(umul_ppmm)
+#define smul_ppmm(w1, w0, u, v) \
+do { \
+ UWtype __w1; \
+ UWtype __m0 = (u), __m1 = (v); \
+ umul_ppmm(__w1, w0, __m0, __m1); \
+ (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) \
+ - (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0); \
+} while (0)
+#endif
+
+ /* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+do { \
+ UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
+ __d1 = __ll_highpart(d); \
+ __d0 = __ll_lowpart(d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (UWtype) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart(n0); \
+ if (__r1 < __m) { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (UWtype) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart(n0); \
+ if (__r0 < __m) { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (UWtype) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+} while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+ __udiv_w_sdiv (defined in libgcc or elsewhere). */
+#if !defined(udiv_qrnnd) && defined(sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+do { \
+ UWtype __r; \
+ (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
+ (r) = __r; \
+} while (0)
+#endif
+
+ /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined(udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
diff --git a/lib/crypto/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c
new file mode 100644
index 000000000..9056fc516
--- /dev/null
+++ b/lib/crypto/mpi/mpi-add.c
@@ -0,0 +1,155 @@
+/* mpi-add.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Add the unsigned integer V to the mpi-integer U and store the
+ * result in W. U and V may be the same.
+ */
+void mpi_add_ui(MPI w, MPI u, unsigned long v)
+{
+ mpi_ptr_t wp, up;
+ mpi_size_t usize, wsize;
+ int usign, wsign;
+
+ usize = u->nlimbs;
+ usign = u->sign;
+ wsign = 0;
+
+ /* If not space for W (and possible carry), increase space. */
+ wsize = usize + 1;
+ if (w->alloced < wsize)
+ mpi_resize(w, wsize);
+
+ /* These must be after realloc (U may be the same as W). */
+ up = u->d;
+ wp = w->d;
+
+ if (!usize) { /* simple */
+ wp[0] = v;
+ wsize = v ? 1:0;
+ } else if (!usign) { /* mpi is not negative */
+ mpi_limb_t cy;
+ cy = mpihelp_add_1(wp, up, usize, v);
+ wp[usize] = cy;
+ wsize = usize + cy;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (usize == 1 && up[0] < v) {
+ wp[0] = v - up[0];
+ wsize = 1;
+ } else {
+ mpihelp_sub_1(wp, up, usize, v);
+ /* Size can decrease with at most one limb. */
+ wsize = usize - (wp[usize-1] == 0);
+ wsign = 1;
+ }
+ }
+
+ w->nlimbs = wsize;
+ w->sign = wsign;
+}
+
+
+void mpi_add(MPI w, MPI u, MPI v)
+{
+ mpi_ptr_t wp, up, vp;
+ mpi_size_t usize, vsize, wsize;
+ int usign, vsign, wsign;
+
+ if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
+ usize = v->nlimbs;
+ usign = v->sign;
+ vsize = u->nlimbs;
+ vsign = u->sign;
+ wsize = usize + 1;
+ RESIZE_IF_NEEDED(w, wsize);
+ /* These must be after realloc (u or v may be the same as w). */
+ up = v->d;
+ vp = u->d;
+ } else {
+ usize = u->nlimbs;
+ usign = u->sign;
+ vsize = v->nlimbs;
+ vsign = v->sign;
+ wsize = usize + 1;
+ RESIZE_IF_NEEDED(w, wsize);
+ /* These must be after realloc (u or v may be the same as w). */
+ up = u->d;
+ vp = v->d;
+ }
+ wp = w->d;
+ wsign = 0;
+
+ if (!vsize) { /* simple */
+ MPN_COPY(wp, up, usize);
+ wsize = usize;
+ wsign = usign;
+ } else if (usign != vsign) { /* different sign */
+ /* This test is right since USIZE >= VSIZE */
+ if (usize != vsize) {
+ mpihelp_sub(wp, up, usize, vp, vsize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ wsign = usign;
+ } else if (mpihelp_cmp(up, vp, usize) < 0) {
+ mpihelp_sub_n(wp, vp, up, usize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ if (!usign)
+ wsign = 1;
+ } else {
+ mpihelp_sub_n(wp, up, vp, usize);
+ wsize = usize;
+ MPN_NORMALIZE(wp, wsize);
+ if (usign)
+ wsign = 1;
+ }
+ } else { /* U and V have same sign. Add them. */
+ mpi_limb_t cy = mpihelp_add(wp, up, usize, vp, vsize);
+ wp[usize] = cy;
+ wsize = usize + cy;
+ if (usign)
+ wsign = 1;
+ }
+
+ w->nlimbs = wsize;
+ w->sign = wsign;
+}
+EXPORT_SYMBOL_GPL(mpi_add);
+
+void mpi_sub(MPI w, MPI u, MPI v)
+{
+ MPI vv = mpi_copy(v);
+ vv->sign = !vv->sign;
+ mpi_add(w, u, vv);
+ mpi_free(vv);
+}
+EXPORT_SYMBOL_GPL(mpi_sub);
+
+void mpi_addm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_add(w, u, v);
+ mpi_mod(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_addm);
+
+void mpi_subm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_sub(w, u, v);
+ mpi_mod(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_subm);
diff --git a/lib/crypto/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
new file mode 100644
index 000000000..070ba784c
--- /dev/null
+++ b/lib/crypto/mpi/mpi-bit.c
@@ -0,0 +1,308 @@
+/* mpi-bit.c - MPI bit level functions
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#define A_LIMB_1 ((mpi_limb_t) 1)
+
+/****************
+ * Sometimes we have MSL (most significant limbs) which are 0;
+ * this is for some reasons not good, so this function removes them.
+ */
+void mpi_normalize(MPI a)
+{
+ for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
+ ;
+}
+EXPORT_SYMBOL_GPL(mpi_normalize);
+
+/****************
+ * Return the number of bits in A.
+ */
+unsigned mpi_get_nbits(MPI a)
+{
+ unsigned n;
+
+ mpi_normalize(a);
+
+ if (a->nlimbs) {
+ mpi_limb_t alimb = a->d[a->nlimbs - 1];
+ if (alimb)
+ n = count_leading_zeros(alimb);
+ else
+ n = BITS_PER_MPI_LIMB;
+ n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
+ } else
+ n = 0;
+ return n;
+}
+EXPORT_SYMBOL_GPL(mpi_get_nbits);
+
+/****************
+ * Test whether bit N is set.
+ */
+int mpi_test_bit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+ mpi_limb_t limb;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return 0; /* too far left: this is a 0 */
+ limb = a->d[limbno];
+ return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(mpi_test_bit);
+
+/****************
+ * Set bit N of A.
+ */
+void mpi_set_bit(MPI a, unsigned int n)
+{
+ unsigned int i, limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs) {
+ for (i = a->nlimbs; i < a->alloced; i++)
+ a->d[i] = 0;
+ mpi_resize(a, limbno+1);
+ a->nlimbs = limbno+1;
+ }
+ a->d[limbno] |= (A_LIMB_1<<bitno);
+}
+
+/****************
+ * Set bit N of A. and clear all bits above
+ */
+void mpi_set_highbit(MPI a, unsigned int n)
+{
+ unsigned int i, limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs) {
+ for (i = a->nlimbs; i < a->alloced; i++)
+ a->d[i] = 0;
+ mpi_resize(a, limbno+1);
+ a->nlimbs = limbno+1;
+ }
+ a->d[limbno] |= (A_LIMB_1<<bitno);
+ for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+ a->nlimbs = limbno+1;
+}
+EXPORT_SYMBOL_GPL(mpi_set_highbit);
+
+/****************
+ * clear bit N of A and all bits above
+ */
+void mpi_clear_highbit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return; /* not allocated, therefore no need to clear bits :-) */
+
+ for ( ; bitno < BITS_PER_MPI_LIMB; bitno++)
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+ a->nlimbs = limbno+1;
+}
+
+/****************
+ * Clear bit N of A.
+ */
+void mpi_clear_bit(MPI a, unsigned int n)
+{
+ unsigned int limbno, bitno;
+
+ limbno = n / BITS_PER_MPI_LIMB;
+ bitno = n % BITS_PER_MPI_LIMB;
+
+ if (limbno >= a->nlimbs)
+ return; /* Don't need to clear this bit, it's far too left. */
+ a->d[limbno] &= ~(A_LIMB_1 << bitno);
+}
+EXPORT_SYMBOL_GPL(mpi_clear_bit);
+
+
+/****************
+ * Shift A by COUNT limbs to the right
+ * This is used only within the MPI library
+ */
+void mpi_rshift_limbs(MPI a, unsigned int count)
+{
+ mpi_ptr_t ap = a->d;
+ mpi_size_t n = a->nlimbs;
+ unsigned int i;
+
+ if (count >= n) {
+ a->nlimbs = 0;
+ return;
+ }
+
+ for (i = 0; i < n - count; i++)
+ ap[i] = ap[i+count];
+ ap[i] = 0;
+ a->nlimbs -= count;
+}
+
+/*
+ * Shift A by N bits to the right.
+ */
+void mpi_rshift(MPI x, MPI a, unsigned int n)
+{
+ mpi_size_t xsize;
+ unsigned int i;
+ unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
+ unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+
+ if (x == a) {
+ /* In-place operation. */
+ if (nlimbs >= x->nlimbs) {
+ x->nlimbs = 0;
+ return;
+ }
+
+ if (nlimbs) {
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
+ }
+ if (x->nlimbs && nbits)
+ mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
+ } else if (nlimbs) {
+ /* Copy and shift by more or equal bits than in a limb. */
+ xsize = a->nlimbs;
+ x->sign = a->sign;
+ RESIZE_IF_NEEDED(x, xsize);
+ x->nlimbs = xsize;
+ for (i = 0; i < a->nlimbs; i++)
+ x->d[i] = a->d[i];
+ x->nlimbs = i;
+
+ if (nlimbs >= x->nlimbs) {
+ x->nlimbs = 0;
+ return;
+ }
+
+ if (nlimbs) {
+ for (i = 0; i < x->nlimbs - nlimbs; i++)
+ x->d[i] = x->d[i+nlimbs];
+ x->d[i] = 0;
+ x->nlimbs -= nlimbs;
+ }
+
+ if (x->nlimbs && nbits)
+ mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
+ } else {
+ /* Copy and shift by less than bits in a limb. */
+ xsize = a->nlimbs;
+ x->sign = a->sign;
+ RESIZE_IF_NEEDED(x, xsize);
+ x->nlimbs = xsize;
+
+ if (xsize) {
+ if (nbits)
+ mpihelp_rshift(x->d, a->d, x->nlimbs, nbits);
+ else {
+ /* The rshift helper function is not specified for
+ * NBITS==0, thus we do a plain copy here.
+ */
+ for (i = 0; i < x->nlimbs; i++)
+ x->d[i] = a->d[i];
+ }
+ }
+ }
+ MPN_NORMALIZE(x->d, x->nlimbs);
+}
+EXPORT_SYMBOL_GPL(mpi_rshift);
+
+/****************
+ * Shift A by COUNT limbs to the left
+ * This is used only within the MPI library
+ */
+void mpi_lshift_limbs(MPI a, unsigned int count)
+{
+ mpi_ptr_t ap;
+ int n = a->nlimbs;
+ int i;
+
+ if (!count || !n)
+ return;
+
+ RESIZE_IF_NEEDED(a, n+count);
+
+ ap = a->d;
+ for (i = n-1; i >= 0; i--)
+ ap[i+count] = ap[i];
+ for (i = 0; i < count; i++)
+ ap[i] = 0;
+ a->nlimbs += count;
+}
+
+/*
+ * Shift A by N bits to the left.
+ */
+void mpi_lshift(MPI x, MPI a, unsigned int n)
+{
+ unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
+ unsigned int nbits = (n%BITS_PER_MPI_LIMB);
+
+ if (x == a && !n)
+ return; /* In-place shift with an amount of zero. */
+
+ if (x != a) {
+ /* Copy A to X. */
+ unsigned int alimbs = a->nlimbs;
+ int asign = a->sign;
+ mpi_ptr_t xp, ap;
+
+ RESIZE_IF_NEEDED(x, alimbs+nlimbs+1);
+ xp = x->d;
+ ap = a->d;
+ MPN_COPY(xp, ap, alimbs);
+ x->nlimbs = alimbs;
+ x->flags = a->flags;
+ x->sign = asign;
+ }
+
+ if (nlimbs && !nbits) {
+ /* Shift a full number of limbs. */
+ mpi_lshift_limbs(x, nlimbs);
+ } else if (n) {
+ /* We use a very dump approach: Shift left by the number of
+ * limbs plus one and than fix it up by an rshift.
+ */
+ mpi_lshift_limbs(x, nlimbs+1);
+ mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits);
+ }
+
+ MPN_NORMALIZE(x->d, x->nlimbs);
+}
diff --git a/lib/crypto/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c
new file mode 100644
index 000000000..0835b6213
--- /dev/null
+++ b/lib/crypto/mpi/mpi-cmp.c
@@ -0,0 +1,98 @@
+/* mpi-cmp.c - MPI functions
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi-internal.h"
+
+int mpi_cmp_ui(MPI u, unsigned long v)
+{
+ mpi_limb_t limb = v;
+
+ mpi_normalize(u);
+ if (u->nlimbs == 0) {
+ if (v == 0)
+ return 0;
+ else
+ return -1;
+ }
+ if (u->sign)
+ return -1;
+ if (u->nlimbs > 1)
+ return 1;
+
+ if (u->d[0] == limb)
+ return 0;
+ else if (u->d[0] > limb)
+ return 1;
+ else
+ return -1;
+}
+EXPORT_SYMBOL_GPL(mpi_cmp_ui);
+
+static int do_mpi_cmp(MPI u, MPI v, int absmode)
+{
+ mpi_size_t usize;
+ mpi_size_t vsize;
+ int usign;
+ int vsign;
+ int cmp;
+
+ mpi_normalize(u);
+ mpi_normalize(v);
+
+ usize = u->nlimbs;
+ vsize = v->nlimbs;
+ usign = absmode ? 0 : u->sign;
+ vsign = absmode ? 0 : v->sign;
+
+ /* Compare sign bits. */
+
+ if (!usign && vsign)
+ return 1;
+ if (usign && !vsign)
+ return -1;
+
+ /* U and V are either both positive or both negative. */
+
+ if (usize != vsize && !usign && !vsign)
+ return usize - vsize;
+ if (usize != vsize && usign && vsign)
+ return vsize + usize;
+ if (!usize)
+ return 0;
+ cmp = mpihelp_cmp(u->d, v->d, usize);
+ if (!cmp)
+ return 0;
+ if ((cmp < 0?1:0) == (usign?1:0))
+ return 1;
+
+ return -1;
+}
+
+int mpi_cmp(MPI u, MPI v)
+{
+ return do_mpi_cmp(u, v, 0);
+}
+EXPORT_SYMBOL_GPL(mpi_cmp);
+
+int mpi_cmpabs(MPI u, MPI v)
+{
+ return do_mpi_cmp(u, v, 1);
+}
+EXPORT_SYMBOL_GPL(mpi_cmpabs);
diff --git a/lib/crypto/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c
new file mode 100644
index 000000000..45beab8b9
--- /dev/null
+++ b/lib/crypto/mpi/mpi-div.c
@@ -0,0 +1,234 @@
+/* mpi-div.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
+void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
+
+void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
+{
+ int divisor_sign = divisor->sign;
+ MPI temp_divisor = NULL;
+
+ /* We need the original value of the divisor after the remainder has been
+ * preliminary calculated. We have to copy it to temporary space if it's
+ * the same variable as REM.
+ */
+ if (rem == divisor) {
+ temp_divisor = mpi_copy(divisor);
+ divisor = temp_divisor;
+ }
+
+ mpi_tdiv_r(rem, dividend, divisor);
+
+ if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs)
+ mpi_add(rem, rem, divisor);
+
+ if (temp_divisor)
+ mpi_free(temp_divisor);
+}
+
+void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
+{
+ MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
+ mpi_fdiv_qr(quot, tmp, dividend, divisor);
+ mpi_free(tmp);
+}
+
+void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
+{
+ int divisor_sign = divisor->sign;
+ MPI temp_divisor = NULL;
+
+ if (quot == divisor || rem == divisor) {
+ temp_divisor = mpi_copy(divisor);
+ divisor = temp_divisor;
+ }
+
+ mpi_tdiv_qr(quot, rem, dividend, divisor);
+
+ if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
+ mpi_sub_ui(quot, quot, 1);
+ mpi_add(rem, rem, divisor);
+ }
+
+ if (temp_divisor)
+ mpi_free(temp_divisor);
+}
+
+/* If den == quot, den needs temporary storage.
+ * If den == rem, den needs temporary storage.
+ * If num == quot, num needs temporary storage.
+ * If den has temporary storage, it can be normalized while being copied,
+ * i.e no extra storage should be allocated.
+ */
+
+void mpi_tdiv_r(MPI rem, MPI num, MPI den)
+{
+ mpi_tdiv_qr(NULL, rem, num, den);
+}
+
+void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
+{
+ mpi_ptr_t np, dp;
+ mpi_ptr_t qp, rp;
+ mpi_size_t nsize = num->nlimbs;
+ mpi_size_t dsize = den->nlimbs;
+ mpi_size_t qsize, rsize;
+ mpi_size_t sign_remainder = num->sign;
+ mpi_size_t sign_quotient = num->sign ^ den->sign;
+ unsigned int normalization_steps;
+ mpi_limb_t q_limb;
+ mpi_ptr_t marker[5];
+ int markidx = 0;
+
+ /* Ensure space is enough for quotient and remainder.
+ * We need space for an extra limb in the remainder, because it's
+ * up-shifted (normalized) below.
+ */
+ rsize = nsize + 1;
+ mpi_resize(rem, rsize);
+
+ qsize = rsize - dsize; /* qsize cannot be bigger than this. */
+ if (qsize <= 0) {
+ if (num != rem) {
+ rem->nlimbs = num->nlimbs;
+ rem->sign = num->sign;
+ MPN_COPY(rem->d, num->d, nsize);
+ }
+ if (quot) {
+ /* This needs to follow the assignment to rem, in case the
+ * numerator and quotient are the same.
+ */
+ quot->nlimbs = 0;
+ quot->sign = 0;
+ }
+ return;
+ }
+
+ if (quot)
+ mpi_resize(quot, qsize);
+
+ /* Read pointers here, when reallocation is finished. */
+ np = num->d;
+ dp = den->d;
+ rp = rem->d;
+
+ /* Optimize division by a single-limb divisor. */
+ if (dsize == 1) {
+ mpi_limb_t rlimb;
+ if (quot) {
+ qp = quot->d;
+ rlimb = mpihelp_divmod_1(qp, np, nsize, dp[0]);
+ qsize -= qp[qsize - 1] == 0;
+ quot->nlimbs = qsize;
+ quot->sign = sign_quotient;
+ } else
+ rlimb = mpihelp_mod_1(np, nsize, dp[0]);
+ rp[0] = rlimb;
+ rsize = rlimb != 0?1:0;
+ rem->nlimbs = rsize;
+ rem->sign = sign_remainder;
+ return;
+ }
+
+
+ if (quot) {
+ qp = quot->d;
+ /* Make sure QP and NP point to different objects. Otherwise the
+ * numerator would be gradually overwritten by the quotient limbs.
+ */
+ if (qp == np) { /* Copy NP object to temporary space. */
+ np = marker[markidx++] = mpi_alloc_limb_space(nsize);
+ MPN_COPY(np, qp, nsize);
+ }
+ } else /* Put quotient at top of remainder. */
+ qp = rp + dsize;
+
+ normalization_steps = count_leading_zeros(dp[dsize - 1]);
+
+ /* Normalize the denominator, i.e. make its most significant bit set by
+ * shifting it NORMALIZATION_STEPS bits to the left. Also shift the
+ * numerator the same number of steps (to keep the quotient the same!).
+ */
+ if (normalization_steps) {
+ mpi_ptr_t tp;
+ mpi_limb_t nlimb;
+
+ /* Shift up the denominator setting the most significant bit of
+ * the most significant word. Use temporary storage not to clobber
+ * the original contents of the denominator.
+ */
+ tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ mpihelp_lshift(tp, dp, dsize, normalization_steps);
+ dp = tp;
+
+ /* Shift up the numerator, possibly introducing a new most
+ * significant word. Move the shifted numerator in the remainder
+ * meanwhile.
+ */
+ nlimb = mpihelp_lshift(rp, np, nsize, normalization_steps);
+ if (nlimb) {
+ rp[nsize] = nlimb;
+ rsize = nsize + 1;
+ } else
+ rsize = nsize;
+ } else {
+ /* The denominator is already normalized, as required. Copy it to
+ * temporary space if it overlaps with the quotient or remainder.
+ */
+ if (dp == rp || (quot && (dp == qp))) {
+ mpi_ptr_t tp;
+
+ tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
+ MPN_COPY(tp, dp, dsize);
+ dp = tp;
+ }
+
+ /* Move the numerator to the remainder. */
+ if (rp != np)
+ MPN_COPY(rp, np, nsize);
+
+ rsize = nsize;
+ }
+
+ q_limb = mpihelp_divrem(qp, 0, rp, rsize, dp, dsize);
+
+ if (quot) {
+ qsize = rsize - dsize;
+ if (q_limb) {
+ qp[qsize] = q_limb;
+ qsize += 1;
+ }
+
+ quot->nlimbs = qsize;
+ quot->sign = sign_quotient;
+ }
+
+ rsize = dsize;
+ MPN_NORMALIZE(rp, rsize);
+
+ if (normalization_steps && rsize) {
+ mpihelp_rshift(rp, rp, rsize, normalization_steps);
+ rsize -= rp[rsize - 1] == 0?1:0;
+ }
+
+ rem->nlimbs = rsize;
+ rem->sign = sign_remainder;
+ while (markidx) {
+ markidx--;
+ mpi_free_limb_space(marker[markidx]);
+ }
+}
diff --git a/lib/crypto/mpi/mpi-inline.h b/lib/crypto/mpi/mpi-inline.h
new file mode 100644
index 000000000..980b6b940
--- /dev/null
+++ b/lib/crypto/mpi/mpi-inline.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* mpi-inline.h - Internal to the Multi Precision Integers
+ * Copyright (C) 1994, 1996, 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#ifndef G10_MPI_INLINE_H
+#define G10_MPI_INLINE_H
+
+#ifndef G10_MPI_INLINE_DECL
+#define G10_MPI_INLINE_DECL static inline
+#endif
+
+G10_MPI_INLINE_DECL mpi_limb_t
+mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+ mpi_limb_t x;
+
+ x = *s1_ptr++;
+ s2_limb += x;
+ *res_ptr++ = s2_limb;
+ if (s2_limb < x) { /* sum is less than the left operand: handle carry */
+ while (--s1_size) {
+ x = *s1_ptr++ + 1; /* add carry */
+ *res_ptr++ = x; /* and store */
+ if (x) /* not 0 (no overflow): we can stop */
+ goto leave;
+ }
+ return 1; /* return carry (size of s1 to small) */
+ }
+
+leave:
+ if (res_ptr != s1_ptr) { /* not the same variable */
+ mpi_size_t i; /* copy the rest */
+ for (i = 0; i < s1_size - 1; i++)
+ res_ptr[i] = s1_ptr[i];
+ }
+ return 0; /* no carry */
+}
+
+G10_MPI_INLINE_DECL mpi_limb_t
+mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+ mpi_ptr_t s2_ptr, mpi_size_t s2_size)
+{
+ mpi_limb_t cy = 0;
+
+ if (s2_size)
+ cy = mpihelp_add_n(res_ptr, s1_ptr, s2_ptr, s2_size);
+
+ if (s1_size - s2_size)
+ cy = mpihelp_add_1(res_ptr + s2_size, s1_ptr + s2_size,
+ s1_size - s2_size, cy);
+ return cy;
+}
+
+G10_MPI_INLINE_DECL mpi_limb_t
+mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb)
+{
+ mpi_limb_t x;
+
+ x = *s1_ptr++;
+ s2_limb = x - s2_limb;
+ *res_ptr++ = s2_limb;
+ if (s2_limb > x) {
+ while (--s1_size) {
+ x = *s1_ptr++;
+ *res_ptr++ = x - 1;
+ if (x)
+ goto leave;
+ }
+ return 1;
+ }
+
+leave:
+ if (res_ptr != s1_ptr) {
+ mpi_size_t i;
+ for (i = 0; i < s1_size - 1; i++)
+ res_ptr[i] = s1_ptr[i];
+ }
+ return 0;
+}
+
+G10_MPI_INLINE_DECL mpi_limb_t
+mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+ mpi_ptr_t s2_ptr, mpi_size_t s2_size)
+{
+ mpi_limb_t cy = 0;
+
+ if (s2_size)
+ cy = mpihelp_sub_n(res_ptr, s1_ptr, s2_ptr, s2_size);
+
+ if (s1_size - s2_size)
+ cy = mpihelp_sub_1(res_ptr + s2_size, s1_ptr + s2_size,
+ s1_size - s2_size, cy);
+ return cy;
+}
+
+#endif /*G10_MPI_INLINE_H */
diff --git a/lib/crypto/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h
new file mode 100644
index 000000000..554002182
--- /dev/null
+++ b/lib/crypto/mpi/mpi-internal.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* mpi-internal.h - Internal to the Multi Precision Integers
+ * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ * Copyright (C) 1998, 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#ifndef G10_MPI_INTERNAL_H
+#define G10_MPI_INTERNAL_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/mpi.h>
+#include <linux/errno.h>
+
+#define log_debug printk
+#define log_bug printk
+
+#define assert(x) \
+ do { \
+ if (!x) \
+ log_bug("failed assertion\n"); \
+ } while (0);
+
+/* If KARATSUBA_THRESHOLD is not already defined, define it to a
+ * value which is good on most machines. */
+
+/* tested 4, 16, 32 and 64, where 16 gave the best performance when
+ * checking a 768 and a 1024 bit ElGamal signature.
+ * (wk 22.12.97) */
+#ifndef KARATSUBA_THRESHOLD
+#define KARATSUBA_THRESHOLD 16
+#endif
+
+/* The code can't handle KARATSUBA_THRESHOLD smaller than 2. */
+#if KARATSUBA_THRESHOLD < 2
+#undef KARATSUBA_THRESHOLD
+#define KARATSUBA_THRESHOLD 2
+#endif
+
+typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
+typedef int mpi_size_t; /* (must be a signed type) */
+
+#define RESIZE_IF_NEEDED(a, b) \
+ do { \
+ if ((a)->alloced < (b)) \
+ mpi_resize((a), (b)); \
+ } while (0)
+
+/* Copy N limbs from S to D. */
+#define MPN_COPY(d, s, n) \
+ do { \
+ mpi_size_t _i; \
+ for (_i = 0; _i < (n); _i++) \
+ (d)[_i] = (s)[_i]; \
+ } while (0)
+
+#define MPN_COPY_INCR(d, s, n) \
+ do { \
+ mpi_size_t _i; \
+ for (_i = 0; _i < (n); _i++) \
+ (d)[_i] = (s)[_i]; \
+ } while (0)
+
+
+#define MPN_COPY_DECR(d, s, n) \
+ do { \
+ mpi_size_t _i; \
+ for (_i = (n)-1; _i >= 0; _i--) \
+ (d)[_i] = (s)[_i]; \
+ } while (0)
+
+/* Zero N limbs at D */
+#define MPN_ZERO(d, n) \
+ do { \
+ int _i; \
+ for (_i = 0; _i < (n); _i++) \
+ (d)[_i] = 0; \
+ } while (0)
+
+#define MPN_NORMALIZE(d, n) \
+ do { \
+ while ((n) > 0) { \
+ if ((d)[(n)-1]) \
+ break; \
+ (n)--; \
+ } \
+ } while (0)
+
+#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
+ do { \
+ if ((size) < KARATSUBA_THRESHOLD) \
+ mul_n_basecase(prodp, up, vp, size); \
+ else \
+ mul_n(prodp, up, vp, size, tspace); \
+ } while (0);
+
+/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
+ * limb not larger than (2**(2*BITS_PER_MP_LIMB))/D - (2**BITS_PER_MP_LIMB).
+ * If this would yield overflow, DI should be the largest possible number
+ * (i.e., only ones). For correct operation, the most significant bit of D
+ * has to be set. Put the quotient in Q and the remainder in R.
+ */
+#define UDIV_QRNND_PREINV(q, r, nh, nl, d, di) \
+ do { \
+ mpi_limb_t _ql __maybe_unused; \
+ mpi_limb_t _q, _r; \
+ mpi_limb_t _xh, _xl; \
+ umul_ppmm(_q, _ql, (nh), (di)); \
+ _q += (nh); /* DI is 2**BITS_PER_MPI_LIMB too small */ \
+ umul_ppmm(_xh, _xl, _q, (d)); \
+ sub_ddmmss(_xh, _r, (nh), (nl), _xh, _xl); \
+ if (_xh) { \
+ sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
+ _q++; \
+ if (_xh) { \
+ sub_ddmmss(_xh, _r, _xh, _r, 0, (d)); \
+ _q++; \
+ } \
+ } \
+ if (_r >= (d)) { \
+ _r -= (d); \
+ _q++; \
+ } \
+ (r) = _r; \
+ (q) = _q; \
+ } while (0)
+
+
+/*-- mpiutil.c --*/
+mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs);
+void mpi_free_limb_space(mpi_ptr_t a);
+void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs);
+
+static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb);
+mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_ptr_t s2_ptr, mpi_size_t size);
+static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+ mpi_ptr_t s2_ptr, mpi_size_t s2_size);
+
+static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb);
+mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_ptr_t s2_ptr, mpi_size_t size);
+static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
+ mpi_ptr_t s2_ptr, mpi_size_t s2_size);
+
+/*-- mpih-cmp.c --*/
+int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size);
+
+/*-- mpih-mul.c --*/
+
+struct karatsuba_ctx {
+ struct karatsuba_ctx *next;
+ mpi_ptr_t tspace;
+ mpi_size_t tspace_size;
+ mpi_ptr_t tp;
+ mpi_size_t tp_size;
+};
+
+void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx);
+
+mpi_limb_t mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb);
+mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb);
+int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
+ mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result);
+void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
+void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
+ mpi_ptr_t tspace);
+void mpihelp_mul_n(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
+
+int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_size_t usize,
+ mpi_ptr_t vp, mpi_size_t vsize,
+ struct karatsuba_ctx *ctx);
+
+/*-- generic_mpih-mul1.c --*/
+mpi_limb_t mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
+ mpi_size_t s1_size, mpi_limb_t s2_limb);
+
+/*-- mpih-div.c --*/
+mpi_limb_t mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb);
+mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
+ mpi_ptr_t np, mpi_size_t nsize,
+ mpi_ptr_t dp, mpi_size_t dsize);
+mpi_limb_t mpihelp_divmod_1(mpi_ptr_t quot_ptr,
+ mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb);
+
+/*-- generic_mpih-[lr]shift.c --*/
+mpi_limb_t mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
+ unsigned cnt);
+mpi_limb_t mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize,
+ unsigned cnt);
+
+/* Define stuff for longlong.h. */
+#define W_TYPE_SIZE BITS_PER_MPI_LIMB
+typedef mpi_limb_t UWtype;
+typedef unsigned int UHWtype;
+#if defined(__GNUC__)
+typedef unsigned int UQItype __attribute__ ((mode(QI)));
+typedef int SItype __attribute__ ((mode(SI)));
+typedef unsigned int USItype __attribute__ ((mode(SI)));
+typedef int DItype __attribute__ ((mode(DI)));
+typedef unsigned int UDItype __attribute__ ((mode(DI)));
+#else
+typedef unsigned char UQItype;
+typedef long SItype;
+typedef unsigned long USItype;
+#endif
+
+#ifdef __GNUC__
+#include "mpi-inline.h"
+#endif
+
+#endif /*G10_MPI_INTERNAL_H */
diff --git a/lib/crypto/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c
new file mode 100644
index 000000000..61e37d18f
--- /dev/null
+++ b/lib/crypto/mpi/mpi-inv.c
@@ -0,0 +1,143 @@
+/* mpi-inv.c - MPI functions
+ * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Calculate the multiplicative inverse X of A mod N
+ * That is: Find the solution x for
+ * 1 = (a*x) mod n
+ */
+int mpi_invm(MPI x, MPI a, MPI n)
+{
+ /* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X)
+ * modified according to Michael Penk's solution for Exercise 35
+ * with further enhancement
+ */
+ MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3;
+ unsigned int k;
+ int sign;
+ int odd;
+
+ if (!mpi_cmp_ui(a, 0))
+ return 0; /* Inverse does not exists. */
+ if (!mpi_cmp_ui(n, 1))
+ return 0; /* Inverse does not exists. */
+
+ u = mpi_copy(a);
+ v = mpi_copy(n);
+
+ for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
+ mpi_rshift(u, u, 1);
+ mpi_rshift(v, v, 1);
+ }
+ odd = mpi_test_bit(v, 0);
+
+ u1 = mpi_alloc_set_ui(1);
+ if (!odd)
+ u2 = mpi_alloc_set_ui(0);
+ u3 = mpi_copy(u);
+ v1 = mpi_copy(v);
+ if (!odd) {
+ v2 = mpi_alloc(mpi_get_nlimbs(u));
+ mpi_sub(v2, u1, u); /* U is used as const 1 */
+ }
+ v3 = mpi_copy(v);
+ if (mpi_test_bit(u, 0)) { /* u is odd */
+ t1 = mpi_alloc_set_ui(0);
+ if (!odd) {
+ t2 = mpi_alloc_set_ui(1);
+ t2->sign = 1;
+ }
+ t3 = mpi_copy(v);
+ t3->sign = !t3->sign;
+ goto Y4;
+ } else {
+ t1 = mpi_alloc_set_ui(1);
+ if (!odd)
+ t2 = mpi_alloc_set_ui(0);
+ t3 = mpi_copy(u);
+ }
+
+ do {
+ do {
+ if (!odd) {
+ if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) {
+ /* one is odd */
+ mpi_add(t1, t1, v);
+ mpi_sub(t2, t2, u);
+ }
+ mpi_rshift(t1, t1, 1);
+ mpi_rshift(t2, t2, 1);
+ mpi_rshift(t3, t3, 1);
+ } else {
+ if (mpi_test_bit(t1, 0))
+ mpi_add(t1, t1, v);
+ mpi_rshift(t1, t1, 1);
+ mpi_rshift(t3, t3, 1);
+ }
+Y4:
+ ;
+ } while (!mpi_test_bit(t3, 0)); /* while t3 is even */
+
+ if (!t3->sign) {
+ mpi_set(u1, t1);
+ if (!odd)
+ mpi_set(u2, t2);
+ mpi_set(u3, t3);
+ } else {
+ mpi_sub(v1, v, t1);
+ sign = u->sign; u->sign = !u->sign;
+ if (!odd)
+ mpi_sub(v2, u, t2);
+ u->sign = sign;
+ sign = t3->sign; t3->sign = !t3->sign;
+ mpi_set(v3, t3);
+ t3->sign = sign;
+ }
+ mpi_sub(t1, u1, v1);
+ if (!odd)
+ mpi_sub(t2, u2, v2);
+ mpi_sub(t3, u3, v3);
+ if (t1->sign) {
+ mpi_add(t1, t1, v);
+ if (!odd)
+ mpi_sub(t2, t2, u);
+ }
+ } while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
+ /* mpi_lshift( u3, k ); */
+ mpi_set(x, u1);
+
+ mpi_free(u1);
+ mpi_free(v1);
+ mpi_free(t1);
+ if (!odd) {
+ mpi_free(u2);
+ mpi_free(v2);
+ mpi_free(t2);
+ }
+ mpi_free(u3);
+ mpi_free(v3);
+ mpi_free(t3);
+
+ mpi_free(u);
+ mpi_free(v);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(mpi_invm);
diff --git a/lib/crypto/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c
new file mode 100644
index 000000000..54fcc0156
--- /dev/null
+++ b/lib/crypto/mpi/mpi-mod.c
@@ -0,0 +1,157 @@
+/* mpi-mod.c - Modular reduction
+ * Copyright (C) 1998, 1999, 2001, 2002, 2003,
+ * 2007 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ */
+
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+/* Context used with Barrett reduction. */
+struct barrett_ctx_s {
+ MPI m; /* The modulus - may not be modified. */
+ int m_copied; /* If true, M needs to be released. */
+ int k;
+ MPI y;
+ MPI r1; /* Helper MPI. */
+ MPI r2; /* Helper MPI. */
+ MPI r3; /* Helper MPI allocated on demand. */
+};
+
+
+
+void mpi_mod(MPI rem, MPI dividend, MPI divisor)
+{
+ mpi_fdiv_r(rem, dividend, divisor);
+}
+
+/* This function returns a new context for Barrett based operations on
+ * the modulus M. This context needs to be released using
+ * _gcry_mpi_barrett_free. If COPY is true M will be transferred to
+ * the context and the user may change M. If COPY is false, M may not
+ * be changed until gcry_mpi_barrett_free has been called.
+ */
+mpi_barrett_t mpi_barrett_init(MPI m, int copy)
+{
+ mpi_barrett_t ctx;
+ MPI tmp;
+
+ mpi_normalize(m);
+ ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ if (copy) {
+ ctx->m = mpi_copy(m);
+ ctx->m_copied = 1;
+ } else
+ ctx->m = m;
+
+ ctx->k = mpi_get_nlimbs(m);
+ tmp = mpi_alloc(ctx->k + 1);
+
+ /* Barrett precalculation: y = floor(b^(2k) / m). */
+ mpi_set_ui(tmp, 1);
+ mpi_lshift_limbs(tmp, 2 * ctx->k);
+ mpi_fdiv_q(tmp, tmp, m);
+
+ ctx->y = tmp;
+ ctx->r1 = mpi_alloc(2 * ctx->k + 1);
+ ctx->r2 = mpi_alloc(2 * ctx->k + 1);
+
+ return ctx;
+}
+
+void mpi_barrett_free(mpi_barrett_t ctx)
+{
+ if (ctx) {
+ mpi_free(ctx->y);
+ mpi_free(ctx->r1);
+ mpi_free(ctx->r2);
+ if (ctx->r3)
+ mpi_free(ctx->r3);
+ if (ctx->m_copied)
+ mpi_free(ctx->m);
+ kfree(ctx);
+ }
+}
+
+
+/* R = X mod M
+ *
+ * Using Barrett reduction. Before using this function
+ * _gcry_mpi_barrett_init must have been called to do the
+ * precalculations. CTX is the context created by this precalculation
+ * and also conveys M. If the Barret reduction could no be done a
+ * straightforward reduction method is used.
+ *
+ * We assume that these conditions are met:
+ * Input: x =(x_2k-1 ...x_0)_b
+ * m =(m_k-1 ....m_0)_b with m_k-1 != 0
+ * Output: r = x mod m
+ */
+void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx)
+{
+ MPI m = ctx->m;
+ int k = ctx->k;
+ MPI y = ctx->y;
+ MPI r1 = ctx->r1;
+ MPI r2 = ctx->r2;
+ int sign;
+
+ mpi_normalize(x);
+ if (mpi_get_nlimbs(x) > 2*k) {
+ mpi_mod(r, x, m);
+ return;
+ }
+
+ sign = x->sign;
+ x->sign = 0;
+
+ /* 1. q1 = floor( x / b^k-1)
+ * q2 = q1 * y
+ * q3 = floor( q2 / b^k+1 )
+ * Actually, we don't need qx, we can work direct on r2
+ */
+ mpi_set(r2, x);
+ mpi_rshift_limbs(r2, k-1);
+ mpi_mul(r2, r2, y);
+ mpi_rshift_limbs(r2, k+1);
+
+ /* 2. r1 = x mod b^k+1
+ * r2 = q3 * m mod b^k+1
+ * r = r1 - r2
+ * 3. if r < 0 then r = r + b^k+1
+ */
+ mpi_set(r1, x);
+ if (r1->nlimbs > k+1) /* Quick modulo operation. */
+ r1->nlimbs = k+1;
+ mpi_mul(r2, r2, m);
+ if (r2->nlimbs > k+1) /* Quick modulo operation. */
+ r2->nlimbs = k+1;
+ mpi_sub(r, r1, r2);
+
+ if (mpi_has_sign(r)) {
+ if (!ctx->r3) {
+ ctx->r3 = mpi_alloc(k + 2);
+ mpi_set_ui(ctx->r3, 1);
+ mpi_lshift_limbs(ctx->r3, k + 1);
+ }
+ mpi_add(r, r, ctx->r3);
+ }
+
+ /* 4. while r >= m do r = r - m */
+ while (mpi_cmp(r, m) >= 0)
+ mpi_sub(r, r, m);
+
+ x->sign = sign;
+}
+
+
+void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx)
+{
+ mpi_mul(w, u, v);
+ mpi_mod_barrett(w, w, ctx);
+}
diff --git a/lib/crypto/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
new file mode 100644
index 000000000..7f4eda856
--- /dev/null
+++ b/lib/crypto/mpi/mpi-mul.c
@@ -0,0 +1,92 @@
+/* mpi-mul.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2001, 2002,
+ * 2003 Free Software Foundation, Inc.
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ */
+
+#include "mpi-internal.h"
+
+void mpi_mul(MPI w, MPI u, MPI v)
+{
+ mpi_size_t usize, vsize, wsize;
+ mpi_ptr_t up, vp, wp;
+ mpi_limb_t cy;
+ int usign, vsign, sign_product;
+ int assign_wp = 0;
+ mpi_ptr_t tmp_limb = NULL;
+
+ if (u->nlimbs < v->nlimbs) {
+ /* Swap U and V. */
+ usize = v->nlimbs;
+ usign = v->sign;
+ up = v->d;
+ vsize = u->nlimbs;
+ vsign = u->sign;
+ vp = u->d;
+ } else {
+ usize = u->nlimbs;
+ usign = u->sign;
+ up = u->d;
+ vsize = v->nlimbs;
+ vsign = v->sign;
+ vp = v->d;
+ }
+ sign_product = usign ^ vsign;
+ wp = w->d;
+
+ /* Ensure W has space enough to store the result. */
+ wsize = usize + vsize;
+ if (w->alloced < wsize) {
+ if (wp == up || wp == vp) {
+ wp = mpi_alloc_limb_space(wsize);
+ assign_wp = 1;
+ } else {
+ mpi_resize(w, wsize);
+ wp = w->d;
+ }
+ } else { /* Make U and V not overlap with W. */
+ if (wp == up) {
+ /* W and U are identical. Allocate temporary space for U. */
+ up = tmp_limb = mpi_alloc_limb_space(usize);
+ /* Is V identical too? Keep it identical with U. */
+ if (wp == vp)
+ vp = up;
+ /* Copy to the temporary space. */
+ MPN_COPY(up, wp, usize);
+ } else if (wp == vp) {
+ /* W and V are identical. Allocate temporary space for V. */
+ vp = tmp_limb = mpi_alloc_limb_space(vsize);
+ /* Copy to the temporary space. */
+ MPN_COPY(vp, wp, vsize);
+ }
+ }
+
+ if (!vsize)
+ wsize = 0;
+ else {
+ mpihelp_mul(wp, up, usize, vp, vsize, &cy);
+ wsize -= cy ? 0:1;
+ }
+
+ if (assign_wp)
+ mpi_assign_limb_space(w, wp, wsize);
+ w->nlimbs = wsize;
+ w->sign = sign_product;
+ if (tmp_limb)
+ mpi_free_limb_space(tmp_limb);
+}
+EXPORT_SYMBOL_GPL(mpi_mul);
+
+void mpi_mulm(MPI w, MPI u, MPI v, MPI m)
+{
+ mpi_mul(w, u, v);
+ mpi_tdiv_r(w, w, m);
+}
+EXPORT_SYMBOL_GPL(mpi_mulm);
diff --git a/lib/crypto/mpi/mpi-pow.c b/lib/crypto/mpi/mpi-pow.c
new file mode 100644
index 000000000..2fd7a46d5
--- /dev/null
+++ b/lib/crypto/mpi/mpi-pow.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpi-pow.c - MPI functions
+ * Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include "mpi-internal.h"
+#include "longlong.h"
+
+/****************
+ * RES = BASE ^ EXP mod MOD
+ */
+int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
+{
+ mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
+ struct karatsuba_ctx karactx = {};
+ mpi_ptr_t xp_marker = NULL;
+ mpi_ptr_t tspace = NULL;
+ mpi_ptr_t rp, ep, mp, bp;
+ mpi_size_t esize, msize, bsize, rsize;
+ int msign, bsign, rsign;
+ mpi_size_t size;
+ int mod_shift_cnt;
+ int negative_result;
+ int assign_rp = 0;
+ mpi_size_t tsize = 0; /* to avoid compiler warning */
+ /* fixme: we should check that the warning is void */
+ int rc = -ENOMEM;
+
+ esize = exp->nlimbs;
+ msize = mod->nlimbs;
+ size = 2 * msize;
+ msign = mod->sign;
+
+ rp = res->d;
+ ep = exp->d;
+
+ if (!msize)
+ return -EINVAL;
+
+ if (!esize) {
+ /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
+ * depending on if MOD equals 1. */
+ res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
+ if (res->nlimbs) {
+ if (mpi_resize(res, 1) < 0)
+ goto enomem;
+ rp = res->d;
+ rp[0] = 1;
+ }
+ res->sign = 0;
+ goto leave;
+ }
+
+ /* Normalize MOD (i.e. make its most significant bit set) as required by
+ * mpn_divrem. This will make the intermediate values in the calculation
+ * slightly larger, but the correct result is obtained after a final
+ * reduction using the original MOD value. */
+ mp = mp_marker = mpi_alloc_limb_space(msize);
+ if (!mp)
+ goto enomem;
+ mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]);
+ if (mod_shift_cnt)
+ mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt);
+ else
+ MPN_COPY(mp, mod->d, msize);
+
+ bsize = base->nlimbs;
+ bsign = base->sign;
+ if (bsize > msize) { /* The base is larger than the module. Reduce it. */
+ /* Allocate (BSIZE + 1) with space for remainder and quotient.
+ * (The quotient is (bsize - msize + 1) limbs.) */
+ bp = bp_marker = mpi_alloc_limb_space(bsize + 1);
+ if (!bp)
+ goto enomem;
+ MPN_COPY(bp, base->d, bsize);
+ /* We don't care about the quotient, store it above the remainder,
+ * at BP + MSIZE. */
+ mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize);
+ bsize = msize;
+ /* Canonicalize the base, since we are going to multiply with it
+ * quite a few times. */
+ MPN_NORMALIZE(bp, bsize);
+ } else
+ bp = base->d;
+
+ if (!bsize) {
+ res->nlimbs = 0;
+ res->sign = 0;
+ goto leave;
+ }
+
+ if (res->alloced < size) {
+ /* We have to allocate more space for RES. If any of the input
+ * parameters are identical to RES, defer deallocation of the old
+ * space. */
+ if (rp == ep || rp == mp || rp == bp) {
+ rp = mpi_alloc_limb_space(size);
+ if (!rp)
+ goto enomem;
+ assign_rp = 1;
+ } else {
+ if (mpi_resize(res, size) < 0)
+ goto enomem;
+ rp = res->d;
+ }
+ } else { /* Make BASE, EXP and MOD not overlap with RES. */
+ if (rp == bp) {
+ /* RES and BASE are identical. Allocate temp. space for BASE. */
+ BUG_ON(bp_marker);
+ bp = bp_marker = mpi_alloc_limb_space(bsize);
+ if (!bp)
+ goto enomem;
+ MPN_COPY(bp, rp, bsize);
+ }
+ if (rp == ep) {
+ /* RES and EXP are identical. Allocate temp. space for EXP. */
+ ep = ep_marker = mpi_alloc_limb_space(esize);
+ if (!ep)
+ goto enomem;
+ MPN_COPY(ep, rp, esize);
+ }
+ if (rp == mp) {
+ /* RES and MOD are identical. Allocate temporary space for MOD. */
+ BUG_ON(mp_marker);
+ mp = mp_marker = mpi_alloc_limb_space(msize);
+ if (!mp)
+ goto enomem;
+ MPN_COPY(mp, rp, msize);
+ }
+ }
+
+ MPN_COPY(rp, bp, bsize);
+ rsize = bsize;
+ rsign = bsign;
+
+ {
+ mpi_size_t i;
+ mpi_ptr_t xp;
+ int c;
+ mpi_limb_t e;
+ mpi_limb_t carry_limb;
+
+ xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
+ if (!xp)
+ goto enomem;
+
+ negative_result = (ep[0] & 1) && base->sign;
+
+ i = esize - 1;
+ e = ep[i];
+ c = count_leading_zeros(e);
+ e = (e << c) << 1; /* shift the exp bits to the left, lose msb */
+ c = BITS_PER_MPI_LIMB - 1 - c;
+
+ /* Main loop.
+ *
+ * Make the result be pointed to alternately by XP and RP. This
+ * helps us avoid block copying, which would otherwise be necessary
+ * with the overlap restrictions of mpihelp_divmod. With 50% probability
+ * the result after this loop will be in the area originally pointed
+ * by RP (==RES->d), and with 50% probability in the area originally
+ * pointed to by XP.
+ */
+
+ for (;;) {
+ while (c) {
+ mpi_ptr_t tp;
+ mpi_size_t xsize;
+
+ /*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */
+ if (rsize < KARATSUBA_THRESHOLD)
+ mpih_sqr_n_basecase(xp, rp, rsize);
+ else {
+ if (!tspace) {
+ tsize = 2 * rsize;
+ tspace =
+ mpi_alloc_limb_space(tsize);
+ if (!tspace)
+ goto enomem;
+ } else if (tsize < (2 * rsize)) {
+ mpi_free_limb_space(tspace);
+ tsize = 2 * rsize;
+ tspace =
+ mpi_alloc_limb_space(tsize);
+ if (!tspace)
+ goto enomem;
+ }
+ mpih_sqr_n(xp, rp, rsize, tspace);
+ }
+
+ xsize = 2 * rsize;
+ if (xsize > msize) {
+ mpihelp_divrem(xp + msize, 0, xp, xsize,
+ mp, msize);
+ xsize = msize;
+ }
+
+ tp = rp;
+ rp = xp;
+ xp = tp;
+ rsize = xsize;
+
+ if ((mpi_limb_signed_t) e < 0) {
+ /*mpihelp_mul( xp, rp, rsize, bp, bsize ); */
+ if (bsize < KARATSUBA_THRESHOLD) {
+ mpi_limb_t tmp;
+ if (mpihelp_mul
+ (xp, rp, rsize, bp, bsize,
+ &tmp) < 0)
+ goto enomem;
+ } else {
+ if (mpihelp_mul_karatsuba_case
+ (xp, rp, rsize, bp, bsize,
+ &karactx) < 0)
+ goto enomem;
+ }
+
+ xsize = rsize + bsize;
+ if (xsize > msize) {
+ mpihelp_divrem(xp + msize, 0,
+ xp, xsize, mp,
+ msize);
+ xsize = msize;
+ }
+
+ tp = rp;
+ rp = xp;
+ xp = tp;
+ rsize = xsize;
+ }
+ e <<= 1;
+ c--;
+ cond_resched();
+ }
+
+ i--;
+ if (i < 0)
+ break;
+ e = ep[i];
+ c = BITS_PER_MPI_LIMB;
+ }
+
+ /* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT
+ * steps. Adjust the result by reducing it with the original MOD.
+ *
+ * Also make sure the result is put in RES->d (where it already
+ * might be, see above).
+ */
+ if (mod_shift_cnt) {
+ carry_limb =
+ mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt);
+ rp = res->d;
+ if (carry_limb) {
+ rp[rsize] = carry_limb;
+ rsize++;
+ }
+ } else {
+ MPN_COPY(res->d, rp, rsize);
+ rp = res->d;
+ }
+
+ if (rsize >= msize) {
+ mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize);
+ rsize = msize;
+ }
+
+ /* Remove any leading zero words from the result. */
+ if (mod_shift_cnt)
+ mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
+ MPN_NORMALIZE(rp, rsize);
+ }
+
+ if (negative_result && rsize) {
+ if (mod_shift_cnt)
+ mpihelp_rshift(mp, mp, msize, mod_shift_cnt);
+ mpihelp_sub(rp, mp, msize, rp, rsize);
+ rsize = msize;
+ rsign = msign;
+ MPN_NORMALIZE(rp, rsize);
+ }
+ res->nlimbs = rsize;
+ res->sign = rsign;
+
+leave:
+ rc = 0;
+enomem:
+ mpihelp_release_karatsuba_ctx(&karactx);
+ if (assign_rp)
+ mpi_assign_limb_space(res, rp, size);
+ if (mp_marker)
+ mpi_free_limb_space(mp_marker);
+ if (bp_marker)
+ mpi_free_limb_space(bp_marker);
+ if (ep_marker)
+ mpi_free_limb_space(ep_marker);
+ if (xp_marker)
+ mpi_free_limb_space(xp_marker);
+ if (tspace)
+ mpi_free_limb_space(tspace);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(mpi_powm);
diff --git a/lib/crypto/mpi/mpi-sub-ui.c b/lib/crypto/mpi/mpi-sub-ui.c
new file mode 100644
index 000000000..b41b082b5
--- /dev/null
+++ b/lib/crypto/mpi/mpi-sub-ui.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI.
+ *
+ * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015
+ * Free Software Foundation, Inc.
+ *
+ * This file was based on the GNU MP Library source file:
+ * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h
+ *
+ * The GNU MP Library is free software; you can redistribute it and/or modify
+ * it under the terms of either:
+ *
+ * * the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 3 of the License, or (at your
+ * option) any later version.
+ *
+ * or
+ *
+ * * the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * or both in parallel, as here.
+ *
+ * The GNU MP Library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received copies of the GNU General Public License and the
+ * GNU Lesser General Public License along with the GNU MP Library. If not,
+ * see https://www.gnu.org/licenses/.
+ */
+
+#include "mpi-internal.h"
+
+int mpi_sub_ui(MPI w, MPI u, unsigned long vval)
+{
+ if (u->nlimbs == 0) {
+ if (mpi_resize(w, 1) < 0)
+ return -ENOMEM;
+ w->d[0] = vval;
+ w->nlimbs = (vval != 0);
+ w->sign = (vval != 0);
+ return 0;
+ }
+
+ /* If not space for W (and possible carry), increase space. */
+ if (mpi_resize(w, u->nlimbs + 1))
+ return -ENOMEM;
+
+ if (u->sign) {
+ mpi_limb_t cy;
+
+ cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ w->d[u->nlimbs] = cy;
+ w->nlimbs = u->nlimbs + cy;
+ w->sign = 1;
+ } else {
+ /* The signs are different. Need exact comparison to determine
+ * which operand to subtract from which.
+ */
+ if (u->nlimbs == 1 && u->d[0] < vval) {
+ w->d[0] = vval - u->d[0];
+ w->nlimbs = 1;
+ w->sign = 1;
+ } else {
+ mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
+ /* Size can decrease with at most one limb. */
+ w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0));
+ w->sign = 0;
+ }
+ }
+
+ mpi_normalize(w);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_sub_ui);
diff --git a/lib/crypto/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
new file mode 100644
index 000000000..3cb6bd148
--- /dev/null
+++ b/lib/crypto/mpi/mpicoder.c
@@ -0,0 +1,752 @@
+/* mpicoder.c - Coder for the external representation of MPIs
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include <linux/bitops.h>
+#include <linux/count_zeros.h>
+#include <linux/byteorder/generic.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include "mpi-internal.h"
+
+#define MAX_EXTERN_SCAN_BYTES (16*1024*1024)
+#define MAX_EXTERN_MPI_BITS 16384
+
+/**
+ * mpi_read_raw_data - Read a raw byte stream as a positive integer
+ * @xbuffer: The data to read
+ * @nbytes: The amount of data to read
+ */
+MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
+{
+ const uint8_t *buffer = xbuffer;
+ int i, j;
+ unsigned nbits, nlimbs;
+ mpi_limb_t a;
+ MPI val = NULL;
+
+ while (nbytes > 0 && buffer[0] == 0) {
+ buffer++;
+ nbytes--;
+ }
+
+ nbits = nbytes * 8;
+ if (nbits > MAX_EXTERN_MPI_BITS) {
+ pr_info("MPI: mpi too large (%u bits)\n", nbits);
+ return NULL;
+ }
+ if (nbytes > 0)
+ nbits -= count_leading_zeros(buffer[0]) - (BITS_PER_LONG - 8);
+
+ nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
+ val = mpi_alloc(nlimbs);
+ if (!val)
+ return NULL;
+ val->nbits = nbits;
+ val->sign = 0;
+ val->nlimbs = nlimbs;
+
+ if (nbytes > 0) {
+ i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+ i %= BYTES_PER_MPI_LIMB;
+ for (j = nlimbs; j > 0; j--) {
+ a = 0;
+ for (; i < BYTES_PER_MPI_LIMB; i++) {
+ a <<= 8;
+ a |= *buffer++;
+ }
+ i = 0;
+ val->d[j - 1] = a;
+ }
+ }
+ return val;
+}
+EXPORT_SYMBOL_GPL(mpi_read_raw_data);
+
+MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
+{
+ const uint8_t *buffer = xbuffer;
+ unsigned int nbits, nbytes;
+ MPI val;
+
+ if (*ret_nread < 2)
+ return ERR_PTR(-EINVAL);
+ nbits = buffer[0] << 8 | buffer[1];
+
+ if (nbits > MAX_EXTERN_MPI_BITS) {
+ pr_info("MPI: mpi too large (%u bits)\n", nbits);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nbytes = DIV_ROUND_UP(nbits, 8);
+ if (nbytes + 2 > *ret_nread) {
+ pr_info("MPI: mpi larger than buffer nbytes=%u ret_nread=%u\n",
+ nbytes, *ret_nread);
+ return ERR_PTR(-EINVAL);
+ }
+
+ val = mpi_read_raw_data(buffer + 2, nbytes);
+ if (!val)
+ return ERR_PTR(-ENOMEM);
+
+ *ret_nread = nbytes + 2;
+ return val;
+}
+EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
+
+/****************
+ * Fill the mpi VAL from the hex string in STR.
+ */
+int mpi_fromstr(MPI val, const char *str)
+{
+ int sign = 0;
+ int prepend_zero = 0;
+ int i, j, c, c1, c2;
+ unsigned int nbits, nbytes, nlimbs;
+ mpi_limb_t a;
+
+ if (*str == '-') {
+ sign = 1;
+ str++;
+ }
+
+ /* Skip optional hex prefix. */
+ if (*str == '0' && str[1] == 'x')
+ str += 2;
+
+ nbits = strlen(str);
+ if (nbits > MAX_EXTERN_SCAN_BYTES) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ nbits *= 4;
+ if ((nbits % 8))
+ prepend_zero = 1;
+
+ nbytes = (nbits+7) / 8;
+ nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
+
+ if (val->alloced < nlimbs)
+ mpi_resize(val, nlimbs);
+
+ i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB);
+ i %= BYTES_PER_MPI_LIMB;
+ j = val->nlimbs = nlimbs;
+ val->sign = sign;
+ for (; j > 0; j--) {
+ a = 0;
+ for (; i < BYTES_PER_MPI_LIMB; i++) {
+ if (prepend_zero) {
+ c1 = '0';
+ prepend_zero = 0;
+ } else
+ c1 = *str++;
+
+ if (!c1) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ c2 = *str++;
+ if (!c2) {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ if (c1 >= '0' && c1 <= '9')
+ c = c1 - '0';
+ else if (c1 >= 'a' && c1 <= 'f')
+ c = c1 - 'a' + 10;
+ else if (c1 >= 'A' && c1 <= 'F')
+ c = c1 - 'A' + 10;
+ else {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ c <<= 4;
+ if (c2 >= '0' && c2 <= '9')
+ c |= c2 - '0';
+ else if (c2 >= 'a' && c2 <= 'f')
+ c |= c2 - 'a' + 10;
+ else if (c2 >= 'A' && c2 <= 'F')
+ c |= c2 - 'A' + 10;
+ else {
+ mpi_clear(val);
+ return -EINVAL;
+ }
+ a <<= 8;
+ a |= c;
+ }
+ i = 0;
+ val->d[j-1] = a;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_fromstr);
+
+MPI mpi_scanval(const char *string)
+{
+ MPI a;
+
+ a = mpi_alloc(0);
+ if (!a)
+ return NULL;
+
+ if (mpi_fromstr(a, string)) {
+ mpi_free(a);
+ return NULL;
+ }
+ mpi_normalize(a);
+ return a;
+}
+EXPORT_SYMBOL_GPL(mpi_scanval);
+
+static int count_lzeros(MPI a)
+{
+ mpi_limb_t alimb;
+ int i, lzeros = 0;
+
+ for (i = a->nlimbs - 1; i >= 0; i--) {
+ alimb = a->d[i];
+ if (alimb == 0) {
+ lzeros += sizeof(mpi_limb_t);
+ } else {
+ lzeros += count_leading_zeros(alimb) / 8;
+ break;
+ }
+ }
+ return lzeros;
+}
+
+/**
+ * mpi_read_buffer() - read MPI to a buffer provided by user (msb first)
+ *
+ * @a: a multi precision integer
+ * @buf: buffer to which the output will be written to. Needs to be at
+ * least mpi_get_size(a) long.
+ * @buf_len: size of the buf.
+ * @nbytes: receives the actual length of the data written on success and
+ * the data to-be-written on -EOVERFLOW in case buf_len was too
+ * small.
+ * @sign: if not NULL, it will be set to the sign of a.
+ *
+ * Return: 0 on success or error code in case of error
+ */
+int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
+ int *sign)
+{
+ uint8_t *p;
+#if BYTES_PER_MPI_LIMB == 4
+ __be32 alimb;
+#elif BYTES_PER_MPI_LIMB == 8
+ __be64 alimb;
+#else
+#error please implement for this limb size.
+#endif
+ unsigned int n = mpi_get_size(a);
+ int i, lzeros;
+
+ if (!buf || !nbytes)
+ return -EINVAL;
+
+ if (sign)
+ *sign = a->sign;
+
+ lzeros = count_lzeros(a);
+
+ if (buf_len < n - lzeros) {
+ *nbytes = n - lzeros;
+ return -EOVERFLOW;
+ }
+
+ p = buf;
+ *nbytes = n - lzeros;
+
+ for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
+ lzeros %= BYTES_PER_MPI_LIMB;
+ i >= 0; i--) {
+#if BYTES_PER_MPI_LIMB == 4
+ alimb = cpu_to_be32(a->d[i]);
+#elif BYTES_PER_MPI_LIMB == 8
+ alimb = cpu_to_be64(a->d[i]);
+#else
+#error please implement for this limb size.
+#endif
+ memcpy(p, (u8 *)&alimb + lzeros, BYTES_PER_MPI_LIMB - lzeros);
+ p += BYTES_PER_MPI_LIMB - lzeros;
+ lzeros = 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_read_buffer);
+
+/*
+ * mpi_get_buffer() - Returns an allocated buffer with the MPI (msb first).
+ * Caller must free the return string.
+ * This function does return a 0 byte buffer with nbytes set to zero if the
+ * value of A is zero.
+ *
+ * @a: a multi precision integer.
+ * @nbytes: receives the length of this buffer.
+ * @sign: if not NULL, it will be set to the sign of the a.
+ *
+ * Return: Pointer to MPI buffer or NULL on error
+ */
+void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
+{
+ uint8_t *buf;
+ unsigned int n;
+ int ret;
+
+ if (!nbytes)
+ return NULL;
+
+ n = mpi_get_size(a);
+
+ if (!n)
+ n++;
+
+ buf = kmalloc(n, GFP_KERNEL);
+
+ if (!buf)
+ return NULL;
+
+ ret = mpi_read_buffer(a, buf, n, nbytes, sign);
+
+ if (ret) {
+ kfree(buf);
+ return NULL;
+ }
+ return buf;
+}
+EXPORT_SYMBOL_GPL(mpi_get_buffer);
+
+/**
+ * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first)
+ *
+ * This function works in the same way as the mpi_read_buffer, but it
+ * takes an sgl instead of u8 * buf.
+ *
+ * @a: a multi precision integer
+ * @sgl: scatterlist to write to. Needs to be at least
+ * mpi_get_size(a) long.
+ * @nbytes: the number of bytes to write. Leading bytes will be
+ * filled with zero.
+ * @sign: if not NULL, it will be set to the sign of a.
+ *
+ * Return: 0 on success or error code in case of error
+ */
+int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned nbytes,
+ int *sign)
+{
+ u8 *p, *p2;
+#if BYTES_PER_MPI_LIMB == 4
+ __be32 alimb;
+#elif BYTES_PER_MPI_LIMB == 8
+ __be64 alimb;
+#else
+#error please implement for this limb size.
+#endif
+ unsigned int n = mpi_get_size(a);
+ struct sg_mapping_iter miter;
+ int i, x, buf_len;
+ int nents;
+
+ if (sign)
+ *sign = a->sign;
+
+ if (nbytes < n)
+ return -EOVERFLOW;
+
+ nents = sg_nents_for_len(sgl, nbytes);
+ if (nents < 0)
+ return -EINVAL;
+
+ sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC | SG_MITER_TO_SG);
+ sg_miter_next(&miter);
+ buf_len = miter.length;
+ p2 = miter.addr;
+
+ while (nbytes > n) {
+ i = min_t(unsigned, nbytes - n, buf_len);
+ memset(p2, 0, i);
+ p2 += i;
+ nbytes -= i;
+
+ buf_len -= i;
+ if (!buf_len) {
+ sg_miter_next(&miter);
+ buf_len = miter.length;
+ p2 = miter.addr;
+ }
+ }
+
+ for (i = a->nlimbs - 1; i >= 0; i--) {
+#if BYTES_PER_MPI_LIMB == 4
+ alimb = a->d[i] ? cpu_to_be32(a->d[i]) : 0;
+#elif BYTES_PER_MPI_LIMB == 8
+ alimb = a->d[i] ? cpu_to_be64(a->d[i]) : 0;
+#else
+#error please implement for this limb size.
+#endif
+ p = (u8 *)&alimb;
+
+ for (x = 0; x < sizeof(alimb); x++) {
+ *p2++ = *p++;
+ if (!--buf_len) {
+ sg_miter_next(&miter);
+ buf_len = miter.length;
+ p2 = miter.addr;
+ }
+ }
+ }
+
+ sg_miter_stop(&miter);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpi_write_to_sgl);
+
+/*
+ * mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with
+ * data from the sgl
+ *
+ * This function works in the same way as the mpi_read_raw_data, but it
+ * takes an sgl instead of void * buffer. i.e. it allocates
+ * a new MPI and reads the content of the sgl to the MPI.
+ *
+ * @sgl: scatterlist to read from
+ * @nbytes: number of bytes to read
+ *
+ * Return: Pointer to a new MPI or NULL on error
+ */
+MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
+{
+ struct sg_mapping_iter miter;
+ unsigned int nbits, nlimbs;
+ int x, j, z, lzeros, ents;
+ unsigned int len;
+ const u8 *buff;
+ mpi_limb_t a;
+ MPI val = NULL;
+
+ ents = sg_nents_for_len(sgl, nbytes);
+ if (ents < 0)
+ return NULL;
+
+ sg_miter_start(&miter, sgl, ents, SG_MITER_ATOMIC | SG_MITER_FROM_SG);
+
+ lzeros = 0;
+ len = 0;
+ while (nbytes > 0) {
+ while (len && !*buff) {
+ lzeros++;
+ len--;
+ buff++;
+ }
+
+ if (len && *buff)
+ break;
+
+ sg_miter_next(&miter);
+ buff = miter.addr;
+ len = miter.length;
+
+ nbytes -= lzeros;
+ lzeros = 0;
+ }
+
+ miter.consumed = lzeros;
+
+ nbytes -= lzeros;
+ nbits = nbytes * 8;
+ if (nbits > MAX_EXTERN_MPI_BITS) {
+ sg_miter_stop(&miter);
+ pr_info("MPI: mpi too large (%u bits)\n", nbits);
+ return NULL;
+ }
+
+ if (nbytes > 0)
+ nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
+
+ sg_miter_stop(&miter);
+
+ nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
+ val = mpi_alloc(nlimbs);
+ if (!val)
+ return NULL;
+
+ val->nbits = nbits;
+ val->sign = 0;
+ val->nlimbs = nlimbs;
+
+ if (nbytes == 0)
+ return val;
+
+ j = nlimbs - 1;
+ a = 0;
+ z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+ z %= BYTES_PER_MPI_LIMB;
+
+ while (sg_miter_next(&miter)) {
+ buff = miter.addr;
+ len = min_t(unsigned, miter.length, nbytes);
+ nbytes -= len;
+
+ for (x = 0; x < len; x++) {
+ a <<= 8;
+ a |= *buff++;
+ if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
+ val->d[j--] = a;
+ a = 0;
+ }
+ }
+ z += x;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl);
+
+/* Perform a two's complement operation on buffer P of size N bytes. */
+static void twocompl(unsigned char *p, unsigned int n)
+{
+ int i;
+
+ for (i = n-1; i >= 0 && !p[i]; i--)
+ ;
+ if (i >= 0) {
+ if ((p[i] & 0x01))
+ p[i] = (((p[i] ^ 0xfe) | 0x01) & 0xff);
+ else if ((p[i] & 0x02))
+ p[i] = (((p[i] ^ 0xfc) | 0x02) & 0xfe);
+ else if ((p[i] & 0x04))
+ p[i] = (((p[i] ^ 0xf8) | 0x04) & 0xfc);
+ else if ((p[i] & 0x08))
+ p[i] = (((p[i] ^ 0xf0) | 0x08) & 0xf8);
+ else if ((p[i] & 0x10))
+ p[i] = (((p[i] ^ 0xe0) | 0x10) & 0xf0);
+ else if ((p[i] & 0x20))
+ p[i] = (((p[i] ^ 0xc0) | 0x20) & 0xe0);
+ else if ((p[i] & 0x40))
+ p[i] = (((p[i] ^ 0x80) | 0x40) & 0xc0);
+ else
+ p[i] = 0x80;
+
+ for (i--; i >= 0; i--)
+ p[i] ^= 0xff;
+ }
+}
+
+int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
+ size_t buflen, size_t *nwritten, MPI a)
+{
+ unsigned int nbits = mpi_get_nbits(a);
+ size_t len;
+ size_t dummy_nwritten;
+ int negative;
+
+ if (!nwritten)
+ nwritten = &dummy_nwritten;
+
+ /* Libgcrypt does no always care to set clear the sign if the value
+ * is 0. For printing this is a bit of a surprise, in particular
+ * because if some of the formats don't support negative numbers but
+ * should be able to print a zero. Thus we need this extra test
+ * for a negative number.
+ */
+ if (a->sign && mpi_cmp_ui(a, 0))
+ negative = 1;
+ else
+ negative = 0;
+
+ len = buflen;
+ *nwritten = 0;
+ if (format == GCRYMPI_FMT_STD) {
+ unsigned char *tmp;
+ int extra = 0;
+ unsigned int n;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+
+ if (negative) {
+ twocompl(tmp, n);
+ if (!(*tmp & 0x80)) {
+ /* Need to extend the sign. */
+ n++;
+ extra = 2;
+ }
+ } else if (n && (*tmp & 0x80)) {
+ /* Positive but the high bit of the returned buffer is set.
+ * Thus we need to print an extra leading 0x00 so that the
+ * output is interpreted as a positive number.
+ */
+ n++;
+ extra = 1;
+ }
+
+ if (buffer && n > len) {
+ /* The provided buffer is too short. */
+ kfree(tmp);
+ return -E2BIG;
+ }
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ if (extra == 1)
+ *s++ = 0;
+ else if (extra)
+ *s++ = 0xff;
+ memcpy(s, tmp, n-!!extra);
+ }
+ kfree(tmp);
+ *nwritten = n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_USG) {
+ unsigned int n = (nbits + 7)/8;
+
+ /* Note: We ignore the sign for this format. */
+ /* FIXME: for performance reasons we should put this into
+ * mpi_aprint because we can then use the buffer directly.
+ */
+
+ if (buffer && n > len)
+ return -E2BIG;
+ if (buffer) {
+ unsigned char *tmp;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ memcpy(buffer, tmp, n);
+ kfree(tmp);
+ }
+ *nwritten = n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_PGP) {
+ unsigned int n = (nbits + 7)/8;
+
+ /* The PGP format can only handle unsigned integers. */
+ if (negative)
+ return -EINVAL;
+
+ if (buffer && n+2 > len)
+ return -E2BIG;
+
+ if (buffer) {
+ unsigned char *tmp;
+ unsigned char *s = buffer;
+
+ s[0] = nbits >> 8;
+ s[1] = nbits;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ memcpy(s+2, tmp, n);
+ kfree(tmp);
+ }
+ *nwritten = n+2;
+ return 0;
+ } else if (format == GCRYMPI_FMT_SSH) {
+ unsigned char *tmp;
+ int extra = 0;
+ unsigned int n;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+
+ if (negative) {
+ twocompl(tmp, n);
+ if (!(*tmp & 0x80)) {
+ /* Need to extend the sign. */
+ n++;
+ extra = 2;
+ }
+ } else if (n && (*tmp & 0x80)) {
+ n++;
+ extra = 1;
+ }
+
+ if (buffer && n+4 > len) {
+ kfree(tmp);
+ return -E2BIG;
+ }
+
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ *s++ = n >> 24;
+ *s++ = n >> 16;
+ *s++ = n >> 8;
+ *s++ = n;
+ if (extra == 1)
+ *s++ = 0;
+ else if (extra)
+ *s++ = 0xff;
+ memcpy(s, tmp, n-!!extra);
+ }
+ kfree(tmp);
+ *nwritten = 4+n;
+ return 0;
+ } else if (format == GCRYMPI_FMT_HEX) {
+ unsigned char *tmp;
+ int i;
+ int extra = 0;
+ unsigned int n = 0;
+
+ tmp = mpi_get_buffer(a, &n, NULL);
+ if (!tmp)
+ return -EINVAL;
+ if (!n || (*tmp & 0x80))
+ extra = 2;
+
+ if (buffer && 2*n + extra + negative + 1 > len) {
+ kfree(tmp);
+ return -E2BIG;
+ }
+ if (buffer) {
+ unsigned char *s = buffer;
+
+ if (negative)
+ *s++ = '-';
+ if (extra) {
+ *s++ = '0';
+ *s++ = '0';
+ }
+
+ for (i = 0; i < n; i++) {
+ unsigned int c = tmp[i];
+
+ *s++ = (c >> 4) < 10 ? '0'+(c>>4) : 'A'+(c>>4)-10;
+ c &= 15;
+ *s++ = c < 10 ? '0'+c : 'A'+c-10;
+ }
+ *s++ = 0;
+ *nwritten = s - buffer;
+ } else {
+ *nwritten = 2*n + extra + negative + 1;
+ }
+ kfree(tmp);
+ return 0;
+ } else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mpi_print);
diff --git a/lib/crypto/mpi/mpih-cmp.c b/lib/crypto/mpi/mpih-cmp.c
new file mode 100644
index 000000000..f23709114
--- /dev/null
+++ b/lib/crypto/mpi/mpih-cmp.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-sub.c - MPI helper functions
+ * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+
+/****************
+ * Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE.
+ * There are no restrictions on the relative sizes of
+ * the two arguments.
+ * Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2.
+ */
+int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size)
+{
+ mpi_size_t i;
+ mpi_limb_t op1_word, op2_word;
+
+ for (i = size - 1; i >= 0; i--) {
+ op1_word = op1_ptr[i];
+ op2_word = op2_ptr[i];
+ if (op1_word != op2_word)
+ goto diff;
+ }
+ return 0;
+
+diff:
+ /* This can *not* be simplified to
+ * op2_word - op2_word
+ * since that expression might give signed overflow. */
+ return (op1_word > op2_word) ? 1 : -1;
+}
diff --git a/lib/crypto/mpi/mpih-div.c b/lib/crypto/mpi/mpih-div.c
new file mode 100644
index 000000000..be70ee2e4
--- /dev/null
+++ b/lib/crypto/mpi/mpih-div.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-div.c - MPI helper functions
+ * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#ifndef UMUL_TIME
+#define UMUL_TIME 1
+#endif
+#ifndef UDIV_TIME
+#define UDIV_TIME UMUL_TIME
+#endif
+
+
+mpi_limb_t
+mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb)
+{
+ mpi_size_t i;
+ mpi_limb_t n1, n0, r;
+ mpi_limb_t dummy __maybe_unused;
+
+ /* Botch: Should this be handled at all? Rely on callers? */
+ if (!dividend_size)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ * dividend is large, pre-invert the divisor, and use
+ * only multiplications in the inner loop.
+ *
+ * This test should be read:
+ * Does it ever help to use udiv_qrnnd_preinv?
+ * && Does what we save compensate for the inversion overhead?
+ */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ mpi_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ *
+ * Special case for DIVISOR_LIMB == 100...000.
+ */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ UDIV_QRNND_PREINV(dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ } else {
+ mpi_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ *
+ * Special case for DIVISOR_LIMB == 100...000.
+ */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for ( ; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(dummy, r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ } else {
+ if (UDIV_NEEDS_NORMALIZATION) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(dummy, r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd(dummy, r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ * it, or because DIVISOR_LIMB is already normalized.
+ */
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ i--;
+
+ for (; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(dummy, r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
+
+/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
+ * the NSIZE-DSIZE least significant quotient limbs at QP
+ * and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
+ * non-zero, generate that many fraction bits and append them after the
+ * other quotient limbs.
+ * Return the most significant limb of the quotient, this is always 0 or 1.
+ *
+ * Preconditions:
+ * 0. NSIZE >= DSIZE.
+ * 1. The most significant bit of the divisor must be set.
+ * 2. QP must either not overlap with the input operands at all, or
+ * QP + DSIZE >= NP must hold true. (This means that it's
+ * possible to put the quotient in the high part of NUM, right after the
+ * remainder in NUM.
+ * 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
+ */
+
+mpi_limb_t
+mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
+ mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize)
+{
+ mpi_limb_t most_significant_q_limb = 0;
+
+ switch (dsize) {
+ case 0:
+ /* We are asked to divide by zero, so go ahead and do it! (To make
+ the compiler not remove this statement, return the value.) */
+ /*
+ * existing clients of this function have been modified
+ * not to call it with dsize == 0, so this should not happen
+ */
+ return 1 / dsize;
+
+ case 1:
+ {
+ mpi_size_t i;
+ mpi_limb_t n1;
+ mpi_limb_t d;
+
+ d = dp[0];
+ n1 = np[nsize - 1];
+
+ if (n1 >= d) {
+ n1 -= d;
+ most_significant_q_limb = 1;
+ }
+
+ qp += qextra_limbs;
+ for (i = nsize - 2; i >= 0; i--)
+ udiv_qrnnd(qp[i], n1, n1, np[i], d);
+ qp -= qextra_limbs;
+
+ for (i = qextra_limbs - 1; i >= 0; i--)
+ udiv_qrnnd(qp[i], n1, n1, 0, d);
+
+ np[0] = n1;
+ }
+ break;
+
+ case 2:
+ {
+ mpi_size_t i;
+ mpi_limb_t n1, n0, n2;
+ mpi_limb_t d1, d0;
+
+ np += nsize - 2;
+ d1 = dp[1];
+ d0 = dp[0];
+ n1 = np[1];
+ n0 = np[0];
+
+ if (n1 >= d1 && (n1 > d1 || n0 >= d0)) {
+ sub_ddmmss(n1, n0, n1, n0, d1, d0);
+ most_significant_q_limb = 1;
+ }
+
+ for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) {
+ mpi_limb_t q;
+ mpi_limb_t r;
+
+ if (i >= qextra_limbs)
+ np--;
+ else
+ np[0] = 0;
+
+ if (n1 == d1) {
+ /* Q should be either 111..111 or 111..110. Need special
+ * treatment of this rare case as normal division would
+ * give overflow. */
+ q = ~(mpi_limb_t) 0;
+
+ r = n0 + d1;
+ if (r < d1) { /* Carry in the addition? */
+ add_ssaaaa(n1, n0, r - d0,
+ np[0], 0, d0);
+ qp[i] = q;
+ continue;
+ }
+ n1 = d0 - (d0 != 0 ? 1 : 0);
+ n0 = -d0;
+ } else {
+ udiv_qrnnd(q, r, n1, n0, d1);
+ umul_ppmm(n1, n0, d0, q);
+ }
+
+ n2 = np[0];
+q_test:
+ if (n1 > r || (n1 == r && n0 > n2)) {
+ /* The estimated Q was too large. */
+ q--;
+ sub_ddmmss(n1, n0, n1, n0, 0, d0);
+ r += d1;
+ if (r >= d1) /* If not carry, test Q again. */
+ goto q_test;
+ }
+
+ qp[i] = q;
+ sub_ddmmss(n1, n0, r, n2, n1, n0);
+ }
+ np[1] = n1;
+ np[0] = n0;
+ }
+ break;
+
+ default:
+ {
+ mpi_size_t i;
+ mpi_limb_t dX, d1, n0;
+
+ np += nsize - dsize;
+ dX = dp[dsize - 1];
+ d1 = dp[dsize - 2];
+ n0 = np[dsize - 1];
+
+ if (n0 >= dX) {
+ if (n0 > dX
+ || mpihelp_cmp(np, dp, dsize - 1) >= 0) {
+ mpihelp_sub_n(np, np, dp, dsize);
+ n0 = np[dsize - 1];
+ most_significant_q_limb = 1;
+ }
+ }
+
+ for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
+ mpi_limb_t q;
+ mpi_limb_t n1, n2;
+ mpi_limb_t cy_limb;
+
+ if (i >= qextra_limbs) {
+ np--;
+ n2 = np[dsize];
+ } else {
+ n2 = np[dsize - 1];
+ MPN_COPY_DECR(np + 1, np, dsize - 1);
+ np[0] = 0;
+ }
+
+ if (n0 == dX) {
+ /* This might over-estimate q, but it's probably not worth
+ * the extra code here to find out. */
+ q = ~(mpi_limb_t) 0;
+ } else {
+ mpi_limb_t r;
+
+ udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
+ umul_ppmm(n1, n0, d1, q);
+
+ while (n1 > r
+ || (n1 == r
+ && n0 > np[dsize - 2])) {
+ q--;
+ r += dX;
+ if (r < dX) /* I.e. "carry in previous addition?" */
+ break;
+ n1 -= n0 < d1;
+ n0 -= d1;
+ }
+ }
+
+ /* Possible optimization: We already have (q * n0) and (1 * n1)
+ * after the calculation of q. Taking advantage of that, we
+ * could make this loop make two iterations less. */
+ cy_limb = mpihelp_submul_1(np, dp, dsize, q);
+
+ if (n2 != cy_limb) {
+ mpihelp_add_n(np, np, dp, dsize);
+ q--;
+ }
+
+ qp[i] = q;
+ n0 = np[dsize - 1];
+ }
+ }
+ }
+
+ return most_significant_q_limb;
+}
+
+/****************
+ * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
+ * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
+ * Return the single-limb remainder.
+ * There are no constraints on the value of the divisor.
+ *
+ * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
+ */
+
+mpi_limb_t
+mpihelp_divmod_1(mpi_ptr_t quot_ptr,
+ mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
+ mpi_limb_t divisor_limb)
+{
+ mpi_size_t i;
+ mpi_limb_t n1, n0, r;
+ mpi_limb_t dummy __maybe_unused;
+
+ if (!dividend_size)
+ return 0;
+
+ /* If multiplication is much faster than division, and the
+ * dividend is large, pre-invert the divisor, and use
+ * only multiplications in the inner loop.
+ *
+ * This test should be read:
+ * Does it ever help to use udiv_qrnnd_preinv?
+ * && Does what we save compensate for the inversion overhead?
+ */
+ if (UDIV_TIME > (2 * UMUL_TIME + 6)
+ && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ mpi_limb_t divisor_limb_inverted;
+
+ divisor_limb <<= normalization_steps;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ */
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t)0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb, divisor_limb_inverted);
+ n1 = n0;
+ }
+ UDIV_QRNND_PREINV(quot_ptr[0], r, r,
+ n1 << normalization_steps,
+ divisor_limb, divisor_limb_inverted);
+ return r >> normalization_steps;
+ } else {
+ mpi_limb_t divisor_limb_inverted;
+
+ /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
+ * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
+ * most significant bit (with weight 2**N) implicit.
+ */
+ /* Special case for DIVISOR_LIMB == 100...000. */
+ if (!(divisor_limb << 1))
+ divisor_limb_inverted = ~(mpi_limb_t) 0;
+ else
+ udiv_qrnnd(divisor_limb_inverted, dummy,
+ -divisor_limb, 0, divisor_limb);
+
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ quot_ptr[i--] = 0;
+
+ for ( ; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ UDIV_QRNND_PREINV(quot_ptr[i], r, r,
+ n0, divisor_limb, divisor_limb_inverted);
+ }
+ return r;
+ }
+ } else {
+ if (UDIV_NEEDS_NORMALIZATION) {
+ int normalization_steps;
+
+ normalization_steps = count_leading_zeros(divisor_limb);
+ if (normalization_steps) {
+ divisor_limb <<= normalization_steps;
+
+ n1 = dividend_ptr[dividend_size - 1];
+ r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
+
+ /* Possible optimization:
+ * if (r == 0
+ * && divisor_limb > ((n1 << normalization_steps)
+ * | (dividend_ptr[dividend_size - 2] >> ...)))
+ * ...one division less...
+ */
+ for (i = dividend_size - 2; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(quot_ptr[i + 1], r, r,
+ ((n1 << normalization_steps)
+ | (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
+ divisor_limb);
+ n1 = n0;
+ }
+ udiv_qrnnd(quot_ptr[0], r, r,
+ n1 << normalization_steps,
+ divisor_limb);
+ return r >> normalization_steps;
+ }
+ }
+ /* No normalization needed, either because udiv_qrnnd doesn't require
+ * it, or because DIVISOR_LIMB is already normalized.
+ */
+ i = dividend_size - 1;
+ r = dividend_ptr[i];
+
+ if (r >= divisor_limb)
+ r = 0;
+ else
+ quot_ptr[i--] = 0;
+
+ for (; i >= 0; i--) {
+ n0 = dividend_ptr[i];
+ udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
+ }
+ return r;
+ }
+}
diff --git a/lib/crypto/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c
new file mode 100644
index 000000000..e5f1c84e3
--- /dev/null
+++ b/lib/crypto/mpi/mpih-mul.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* mpihelp-mul.c - MPI helper functions
+ * Copyright (C) 1994, 1996, 1998, 1999,
+ * 2000 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#include <linux/string.h>
+#include "mpi-internal.h"
+#include "longlong.h"
+
+#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
+ do { \
+ if ((size) < KARATSUBA_THRESHOLD) \
+ mul_n_basecase(prodp, up, vp, size); \
+ else \
+ mul_n(prodp, up, vp, size, tspace); \
+ } while (0);
+
+#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
+ do { \
+ if ((size) < KARATSUBA_THRESHOLD) \
+ mpih_sqr_n_basecase(prodp, up, size); \
+ else \
+ mpih_sqr_n(prodp, up, size, tspace); \
+ } while (0);
+
+/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
+ * both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
+ * always stored. Return the most significant limb.
+ *
+ * Argument constraints:
+ * 1. PRODP != UP and PRODP != VP, i.e. the destination
+ * must be distinct from the multiplier and the multiplicand.
+ *
+ *
+ * Handle simple cases with traditional multiplication.
+ *
+ * This is the most critical code of multiplication. All multiplies rely
+ * on this, both small and huge. Small ones arrive here immediately. Huge
+ * ones arrive here as this is the base case for Karatsuba's recursive
+ * algorithm below.
+ */
+
+static mpi_limb_t
+mul_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
+{
+ mpi_size_t i;
+ mpi_limb_t cy;
+ mpi_limb_t v_limb;
+
+ /* Multiply by the first limb in V separately, as the result can be
+ * stored (not added) to PROD. We also avoid a loop for zeroing. */
+ v_limb = vp[0];
+ if (v_limb <= 1) {
+ if (v_limb == 1)
+ MPN_COPY(prodp, up, size);
+ else
+ MPN_ZERO(prodp, size);
+ cy = 0;
+ } else
+ cy = mpihelp_mul_1(prodp, up, size, v_limb);
+
+ prodp[size] = cy;
+ prodp++;
+
+ /* For each iteration in the outer loop, multiply one limb from
+ * U with one limb from V, and add it to PROD. */
+ for (i = 1; i < size; i++) {
+ v_limb = vp[i];
+ if (v_limb <= 1) {
+ cy = 0;
+ if (v_limb == 1)
+ cy = mpihelp_add_n(prodp, prodp, up, size);
+ } else
+ cy = mpihelp_addmul_1(prodp, up, size, v_limb);
+
+ prodp[size] = cy;
+ prodp++;
+ }
+
+ return cy;
+}
+
+static void
+mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
+ mpi_size_t size, mpi_ptr_t tspace)
+{
+ if (size & 1) {
+ /* The size is odd, and the code below doesn't handle that.
+ * Multiply the least significant (size - 1) limbs with a recursive
+ * call, and handle the most significant limb of S1 and S2
+ * separately.
+ * A slightly faster way to do this would be to make the Karatsuba
+ * code below behave as if the size were even, and let it check for
+ * odd size in the end. I.e., in essence move this code to the end.
+ * Doing so would save us a recursive call, and potentially make the
+ * stack grow a lot less.
+ */
+ mpi_size_t esize = size - 1; /* even size */
+ mpi_limb_t cy_limb;
+
+ MPN_MUL_N_RECURSE(prodp, up, vp, esize, tspace);
+ cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, vp[esize]);
+ prodp[esize + esize] = cy_limb;
+ cy_limb = mpihelp_addmul_1(prodp + esize, vp, size, up[esize]);
+ prodp[esize + size] = cy_limb;
+ } else {
+ /* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
+ *
+ * Split U in two pieces, U1 and U0, such that
+ * U = U0 + U1*(B**n),
+ * and V in V1 and V0, such that
+ * V = V0 + V1*(B**n).
+ *
+ * UV is then computed recursively using the identity
+ *
+ * 2n n n n
+ * UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
+ * 1 1 1 0 0 1 0 0
+ *
+ * Where B = 2**BITS_PER_MP_LIMB.
+ */
+ mpi_size_t hsize = size >> 1;
+ mpi_limb_t cy;
+ int negflg;
+
+ /* Product H. ________________ ________________
+ * |_____U1 x V1____||____U0 x V0_____|
+ * Put result in upper part of PROD and pass low part of TSPACE
+ * as new TSPACE.
+ */
+ MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize,
+ tspace);
+
+ /* Product M. ________________
+ * |_(U1-U0)(V0-V1)_|
+ */
+ if (mpihelp_cmp(up + hsize, up, hsize) >= 0) {
+ mpihelp_sub_n(prodp, up + hsize, up, hsize);
+ negflg = 0;
+ } else {
+ mpihelp_sub_n(prodp, up, up + hsize, hsize);
+ negflg = 1;
+ }
+ if (mpihelp_cmp(vp + hsize, vp, hsize) >= 0) {
+ mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize);
+ negflg ^= 1;
+ } else {
+ mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize);
+ /* No change of NEGFLG. */
+ }
+ /* Read temporary operands from low part of PROD.
+ * Put result in low part of TSPACE using upper part of TSPACE
+ * as new TSPACE.
+ */
+ MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize,
+ tspace + size);
+
+ /* Add/copy product H. */
+ MPN_COPY(prodp + hsize, prodp + size, hsize);
+ cy = mpihelp_add_n(prodp + size, prodp + size,
+ prodp + size + hsize, hsize);
+
+ /* Add product M (if NEGFLG M is a negative number) */
+ if (negflg)
+ cy -=
+ mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace,
+ size);
+ else
+ cy +=
+ mpihelp_add_n(prodp + hsize, prodp + hsize, tspace,
+ size);
+
+ /* Product L. ________________ ________________
+ * |________________||____U0 x V0_____|
+ * Read temporary operands from low part of PROD.
+ * Put result in low part of TSPACE using upper part of TSPACE
+ * as new TSPACE.
+ */
+ MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
+
+ /* Add/copy Product L (twice) */
+
+ cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
+ if (cy)
+ mpihelp_add_1(prodp + hsize + size,
+ prodp + hsize + size, hsize, cy);
+
+ MPN_COPY(prodp, tspace, hsize);
+ cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize,
+ hsize);
+ if (cy)
+ mpihelp_add_1(prodp + size, prodp + size, size, 1);
+ }
+}
+
+void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size)
+{
+ mpi_size_t i;
+ mpi_limb_t cy_limb;
+ mpi_limb_t v_limb;
+
+ /* Multiply by the first limb in V separately, as the result can be
+ * stored (not added) to PROD. We also avoid a loop for zeroing. */
+ v_limb = up[0];
+ if (v_limb <= 1) {
+ if (v_limb == 1)
+ MPN_COPY(prodp, up, size);
+ else
+ MPN_ZERO(prodp, size);
+ cy_limb = 0;
+ } else
+ cy_limb = mpihelp_mul_1(prodp, up, size, v_limb);
+
+ prodp[size] = cy_limb;
+ prodp++;
+
+ /* For each iteration in the outer loop, multiply one limb from
+ * U with one limb from V, and add it to PROD. */
+ for (i = 1; i < size; i++) {
+ v_limb = up[i];
+ if (v_limb <= 1) {
+ cy_limb = 0;
+ if (v_limb == 1)
+ cy_limb = mpihelp_add_n(prodp, prodp, up, size);
+ } else
+ cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb);
+
+ prodp[size] = cy_limb;
+ prodp++;
+ }
+}
+
+void
+mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
+{
+ if (size & 1) {
+ /* The size is odd, and the code below doesn't handle that.
+ * Multiply the least significant (size - 1) limbs with a recursive
+ * call, and handle the most significant limb of S1 and S2
+ * separately.
+ * A slightly faster way to do this would be to make the Karatsuba
+ * code below behave as if the size were even, and let it check for
+ * odd size in the end. I.e., in essence move this code to the end.
+ * Doing so would save us a recursive call, and potentially make the
+ * stack grow a lot less.
+ */
+ mpi_size_t esize = size - 1; /* even size */
+ mpi_limb_t cy_limb;
+
+ MPN_SQR_N_RECURSE(prodp, up, esize, tspace);
+ cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, up[esize]);
+ prodp[esize + esize] = cy_limb;
+ cy_limb = mpihelp_addmul_1(prodp + esize, up, size, up[esize]);
+
+ prodp[esize + size] = cy_limb;
+ } else {
+ mpi_size_t hsize = size >> 1;
+ mpi_limb_t cy;
+
+ /* Product H. ________________ ________________
+ * |_____U1 x U1____||____U0 x U0_____|
+ * Put result in upper part of PROD and pass low part of TSPACE
+ * as new TSPACE.
+ */
+ MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
+
+ /* Product M. ________________
+ * |_(U1-U0)(U0-U1)_|
+ */
+ if (mpihelp_cmp(up + hsize, up, hsize) >= 0)
+ mpihelp_sub_n(prodp, up + hsize, up, hsize);
+ else
+ mpihelp_sub_n(prodp, up, up + hsize, hsize);
+
+ /* Read temporary operands from low part of PROD.
+ * Put result in low part of TSPACE using upper part of TSPACE
+ * as new TSPACE. */
+ MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
+
+ /* Add/copy product H */
+ MPN_COPY(prodp + hsize, prodp + size, hsize);
+ cy = mpihelp_add_n(prodp + size, prodp + size,
+ prodp + size + hsize, hsize);
+
+ /* Add product M (if NEGFLG M is a negative number). */
+ cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size);
+
+ /* Product L. ________________ ________________
+ * |________________||____U0 x U0_____|
+ * Read temporary operands from low part of PROD.
+ * Put result in low part of TSPACE using upper part of TSPACE
+ * as new TSPACE. */
+ MPN_SQR_N_RECURSE(tspace, up, hsize, tspace + size);
+
+ /* Add/copy Product L (twice). */
+ cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
+ if (cy)
+ mpihelp_add_1(prodp + hsize + size,
+ prodp + hsize + size, hsize, cy);
+
+ MPN_COPY(prodp, tspace, hsize);
+ cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize,
+ hsize);
+ if (cy)
+ mpihelp_add_1(prodp + size, prodp + size, size, 1);
+ }
+}
+
+
+void mpihelp_mul_n(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
+{
+ if (up == vp) {
+ if (size < KARATSUBA_THRESHOLD)
+ mpih_sqr_n_basecase(prodp, up, size);
+ else {
+ mpi_ptr_t tspace;
+ tspace = mpi_alloc_limb_space(2 * size);
+ mpih_sqr_n(prodp, up, size, tspace);
+ mpi_free_limb_space(tspace);
+ }
+ } else {
+ if (size < KARATSUBA_THRESHOLD)
+ mul_n_basecase(prodp, up, vp, size);
+ else {
+ mpi_ptr_t tspace;
+ tspace = mpi_alloc_limb_space(2 * size);
+ mul_n(prodp, up, vp, size, tspace);
+ mpi_free_limb_space(tspace);
+ }
+ }
+}
+
+int
+mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
+ mpi_ptr_t up, mpi_size_t usize,
+ mpi_ptr_t vp, mpi_size_t vsize,
+ struct karatsuba_ctx *ctx)
+{
+ mpi_limb_t cy;
+
+ if (!ctx->tspace || ctx->tspace_size < vsize) {
+ if (ctx->tspace)
+ mpi_free_limb_space(ctx->tspace);
+ ctx->tspace = mpi_alloc_limb_space(2 * vsize);
+ if (!ctx->tspace)
+ return -ENOMEM;
+ ctx->tspace_size = vsize;
+ }
+
+ MPN_MUL_N_RECURSE(prodp, up, vp, vsize, ctx->tspace);
+
+ prodp += vsize;
+ up += vsize;
+ usize -= vsize;
+ if (usize >= vsize) {
+ if (!ctx->tp || ctx->tp_size < vsize) {
+ if (ctx->tp)
+ mpi_free_limb_space(ctx->tp);
+ ctx->tp = mpi_alloc_limb_space(2 * vsize);
+ if (!ctx->tp) {
+ if (ctx->tspace)
+ mpi_free_limb_space(ctx->tspace);
+ ctx->tspace = NULL;
+ return -ENOMEM;
+ }
+ ctx->tp_size = vsize;
+ }
+
+ do {
+ MPN_MUL_N_RECURSE(ctx->tp, up, vp, vsize, ctx->tspace);
+ cy = mpihelp_add_n(prodp, prodp, ctx->tp, vsize);
+ mpihelp_add_1(prodp + vsize, ctx->tp + vsize, vsize,
+ cy);
+ prodp += vsize;
+ up += vsize;
+ usize -= vsize;
+ } while (usize >= vsize);
+ }
+
+ if (usize) {
+ if (usize < KARATSUBA_THRESHOLD) {
+ mpi_limb_t tmp;
+ if (mpihelp_mul(ctx->tspace, vp, vsize, up, usize, &tmp)
+ < 0)
+ return -ENOMEM;
+ } else {
+ if (!ctx->next) {
+ ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx->next)
+ return -ENOMEM;
+ }
+ if (mpihelp_mul_karatsuba_case(ctx->tspace,
+ vp, vsize,
+ up, usize,
+ ctx->next) < 0)
+ return -ENOMEM;
+ }
+
+ cy = mpihelp_add_n(prodp, prodp, ctx->tspace, vsize);
+ mpihelp_add_1(prodp + vsize, ctx->tspace + vsize, usize, cy);
+ }
+
+ return 0;
+}
+
+void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx)
+{
+ struct karatsuba_ctx *ctx2;
+
+ if (ctx->tp)
+ mpi_free_limb_space(ctx->tp);
+ if (ctx->tspace)
+ mpi_free_limb_space(ctx->tspace);
+ for (ctx = ctx->next; ctx; ctx = ctx2) {
+ ctx2 = ctx->next;
+ if (ctx->tp)
+ mpi_free_limb_space(ctx->tp);
+ if (ctx->tspace)
+ mpi_free_limb_space(ctx->tspace);
+ kfree(ctx);
+ }
+}
+
+/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
+ * and v (pointed to by VP, with VSIZE limbs), and store the result at
+ * PRODP. USIZE + VSIZE limbs are always stored, but if the input
+ * operands are normalized. Return the most significant limb of the
+ * result.
+ *
+ * NOTE: The space pointed to by PRODP is overwritten before finished
+ * with U and V, so overlap is an error.
+ *
+ * Argument constraints:
+ * 1. USIZE >= VSIZE.
+ * 2. PRODP != UP and PRODP != VP, i.e. the destination
+ * must be distinct from the multiplier and the multiplicand.
+ */
+
+int
+mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
+ mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result)
+{
+ mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
+ mpi_limb_t cy;
+ struct karatsuba_ctx ctx;
+
+ if (vsize < KARATSUBA_THRESHOLD) {
+ mpi_size_t i;
+ mpi_limb_t v_limb;
+
+ if (!vsize) {
+ *_result = 0;
+ return 0;
+ }
+
+ /* Multiply by the first limb in V separately, as the result can be
+ * stored (not added) to PROD. We also avoid a loop for zeroing. */
+ v_limb = vp[0];
+ if (v_limb <= 1) {
+ if (v_limb == 1)
+ MPN_COPY(prodp, up, usize);
+ else
+ MPN_ZERO(prodp, usize);
+ cy = 0;
+ } else
+ cy = mpihelp_mul_1(prodp, up, usize, v_limb);
+
+ prodp[usize] = cy;
+ prodp++;
+
+ /* For each iteration in the outer loop, multiply one limb from
+ * U with one limb from V, and add it to PROD. */
+ for (i = 1; i < vsize; i++) {
+ v_limb = vp[i];
+ if (v_limb <= 1) {
+ cy = 0;
+ if (v_limb == 1)
+ cy = mpihelp_add_n(prodp, prodp, up,
+ usize);
+ } else
+ cy = mpihelp_addmul_1(prodp, up, usize, v_limb);
+
+ prodp[usize] = cy;
+ prodp++;
+ }
+
+ *_result = cy;
+ return 0;
+ }
+
+ memset(&ctx, 0, sizeof ctx);
+ if (mpihelp_mul_karatsuba_case(prodp, up, usize, vp, vsize, &ctx) < 0)
+ return -ENOMEM;
+ mpihelp_release_karatsuba_ctx(&ctx);
+ *_result = *prod_endp;
+ return 0;
+}
diff --git a/lib/crypto/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c
new file mode 100644
index 000000000..aa8c46544
--- /dev/null
+++ b/lib/crypto/mpi/mpiutil.c
@@ -0,0 +1,330 @@
+/* mpiutil.ac - Utility functions for MPI
+ * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ *
+ * This file is part of GnuPG.
+ *
+ * GnuPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GnuPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include "mpi-internal.h"
+
+/* Constants allocated right away at startup. */
+static MPI constants[MPI_NUMBER_OF_CONSTANTS];
+
+/* Initialize the MPI subsystem. This is called early and allows to
+ * do some initialization without taking care of threading issues.
+ */
+static int __init mpi_init(void)
+{
+ int idx;
+ unsigned long value;
+
+ for (idx = 0; idx < MPI_NUMBER_OF_CONSTANTS; idx++) {
+ switch (idx) {
+ case MPI_C_ZERO:
+ value = 0;
+ break;
+ case MPI_C_ONE:
+ value = 1;
+ break;
+ case MPI_C_TWO:
+ value = 2;
+ break;
+ case MPI_C_THREE:
+ value = 3;
+ break;
+ case MPI_C_FOUR:
+ value = 4;
+ break;
+ case MPI_C_EIGHT:
+ value = 8;
+ break;
+ default:
+ pr_err("MPI: invalid mpi_const selector %d\n", idx);
+ return -EFAULT;
+ }
+ constants[idx] = mpi_alloc_set_ui(value);
+ constants[idx]->flags = (16|32);
+ }
+
+ return 0;
+}
+postcore_initcall(mpi_init);
+
+/* Return a constant MPI descripbed by NO which is one of the
+ * MPI_C_xxx macros. There is no need to copy this returned value; it
+ * may be used directly.
+ */
+MPI mpi_const(enum gcry_mpi_constants no)
+{
+ if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS)
+ pr_err("MPI: invalid mpi_const selector %d\n", no);
+ if (!constants[no])
+ pr_err("MPI: MPI subsystem not initialized\n");
+ return constants[no];
+}
+EXPORT_SYMBOL_GPL(mpi_const);
+
+/****************
+ * Note: It was a bad idea to use the number of limbs to allocate
+ * because on a alpha the limbs are large but we normally need
+ * integers of n bits - So we should change this to bits (or bytes).
+ *
+ * But mpi_alloc is used in a lot of places :-)
+ */
+MPI mpi_alloc(unsigned nlimbs)
+{
+ MPI a;
+
+ a = kmalloc(sizeof *a, GFP_KERNEL);
+ if (!a)
+ return a;
+
+ if (nlimbs) {
+ a->d = mpi_alloc_limb_space(nlimbs);
+ if (!a->d) {
+ kfree(a);
+ return NULL;
+ }
+ } else {
+ a->d = NULL;
+ }
+
+ a->alloced = nlimbs;
+ a->nlimbs = 0;
+ a->sign = 0;
+ a->flags = 0;
+ a->nbits = 0;
+ return a;
+}
+EXPORT_SYMBOL_GPL(mpi_alloc);
+
+mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
+{
+ size_t len = nlimbs * sizeof(mpi_limb_t);
+
+ if (!len)
+ return NULL;
+
+ return kmalloc(len, GFP_KERNEL);
+}
+
+void mpi_free_limb_space(mpi_ptr_t a)
+{
+ if (!a)
+ return;
+
+ kfree_sensitive(a);
+}
+
+void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
+{
+ mpi_free_limb_space(a->d);
+ a->d = ap;
+ a->alloced = nlimbs;
+}
+
+/****************
+ * Resize the array of A to NLIMBS. the additional space is cleared
+ * (set to 0) [done by m_realloc()]
+ */
+int mpi_resize(MPI a, unsigned nlimbs)
+{
+ void *p;
+
+ if (nlimbs <= a->alloced)
+ return 0; /* no need to do it */
+
+ if (a->d) {
+ p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
+ kfree_sensitive(a->d);
+ a->d = p;
+ } else {
+ a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ if (!a->d)
+ return -ENOMEM;
+ }
+ a->alloced = nlimbs;
+ return 0;
+}
+
+void mpi_clear(MPI a)
+{
+ if (!a)
+ return;
+ a->nlimbs = 0;
+ a->flags = 0;
+}
+EXPORT_SYMBOL_GPL(mpi_clear);
+
+void mpi_free(MPI a)
+{
+ if (!a)
+ return;
+
+ if (a->flags & 4)
+ kfree_sensitive(a->d);
+ else
+ mpi_free_limb_space(a->d);
+
+ if (a->flags & ~7)
+ pr_info("invalid flag value in mpi\n");
+ kfree(a);
+}
+EXPORT_SYMBOL_GPL(mpi_free);
+
+/****************
+ * Note: This copy function should not interpret the MPI
+ * but copy it transparently.
+ */
+MPI mpi_copy(MPI a)
+{
+ int i;
+ MPI b;
+
+ if (a) {
+ b = mpi_alloc(a->nlimbs);
+ b->nlimbs = a->nlimbs;
+ b->sign = a->sign;
+ b->flags = a->flags;
+ b->flags &= ~(16|32); /* Reset the immutable and constant flags. */
+ for (i = 0; i < b->nlimbs; i++)
+ b->d[i] = a->d[i];
+ } else
+ b = NULL;
+ return b;
+}
+
+/****************
+ * This function allocates an MPI which is optimized to hold
+ * a value as large as the one given in the argument and allocates it
+ * with the same flags as A.
+ */
+MPI mpi_alloc_like(MPI a)
+{
+ MPI b;
+
+ if (a) {
+ b = mpi_alloc(a->nlimbs);
+ b->nlimbs = 0;
+ b->sign = 0;
+ b->flags = a->flags;
+ } else
+ b = NULL;
+
+ return b;
+}
+
+
+/* Set U into W and release U. If W is NULL only U will be released. */
+void mpi_snatch(MPI w, MPI u)
+{
+ if (w) {
+ mpi_assign_limb_space(w, u->d, u->alloced);
+ w->nlimbs = u->nlimbs;
+ w->sign = u->sign;
+ w->flags = u->flags;
+ u->alloced = 0;
+ u->nlimbs = 0;
+ u->d = NULL;
+ }
+ mpi_free(u);
+}
+
+
+MPI mpi_set(MPI w, MPI u)
+{
+ mpi_ptr_t wp, up;
+ mpi_size_t usize = u->nlimbs;
+ int usign = u->sign;
+
+ if (!w)
+ w = mpi_alloc(mpi_get_nlimbs(u));
+ RESIZE_IF_NEEDED(w, usize);
+ wp = w->d;
+ up = u->d;
+ MPN_COPY(wp, up, usize);
+ w->nlimbs = usize;
+ w->flags = u->flags;
+ w->flags &= ~(16|32); /* Reset the immutable and constant flags. */
+ w->sign = usign;
+ return w;
+}
+EXPORT_SYMBOL_GPL(mpi_set);
+
+MPI mpi_set_ui(MPI w, unsigned long u)
+{
+ if (!w)
+ w = mpi_alloc(1);
+ /* FIXME: If U is 0 we have no need to resize and thus possible
+ * allocating the limbs.
+ */
+ RESIZE_IF_NEEDED(w, 1);
+ w->d[0] = u;
+ w->nlimbs = u ? 1 : 0;
+ w->sign = 0;
+ w->flags = 0;
+ return w;
+}
+EXPORT_SYMBOL_GPL(mpi_set_ui);
+
+MPI mpi_alloc_set_ui(unsigned long u)
+{
+ MPI w = mpi_alloc(1);
+ w->d[0] = u;
+ w->nlimbs = u ? 1 : 0;
+ w->sign = 0;
+ return w;
+}
+
+/****************
+ * Swap the value of A and B, when SWAP is 1.
+ * Leave the value when SWAP is 0.
+ * This implementation should be constant-time regardless of SWAP.
+ */
+void mpi_swap_cond(MPI a, MPI b, unsigned long swap)
+{
+ mpi_size_t i;
+ mpi_size_t nlimbs;
+ mpi_limb_t mask = ((mpi_limb_t)0) - swap;
+ mpi_limb_t x;
+
+ if (a->alloced > b->alloced)
+ nlimbs = b->alloced;
+ else
+ nlimbs = a->alloced;
+ if (a->nlimbs > nlimbs || b->nlimbs > nlimbs)
+ return;
+
+ for (i = 0; i < nlimbs; i++) {
+ x = mask & (a->d[i] ^ b->d[i]);
+ a->d[i] = a->d[i] ^ x;
+ b->d[i] = b->d[i] ^ x;
+ }
+
+ x = mask & (a->nlimbs ^ b->nlimbs);
+ a->nlimbs = a->nlimbs ^ x;
+ b->nlimbs = b->nlimbs ^ x;
+
+ x = mask & (a->sign ^ b->sign);
+ a->sign = a->sign ^ x;
+ b->sign = b->sign ^ x;
+}
+
+MODULE_DESCRIPTION("Multiprecision maths library");
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
new file mode 100644
index 000000000..7fb71845c
--- /dev/null
+++ b/lib/crypto/poly1305-donna32.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/poly1305.h>
+
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
+ key->key.r[1] = (get_unaligned_le32(&raw_key[3]) >> 2) & 0x3ffff03;
+ key->key.r[2] = (get_unaligned_le32(&raw_key[6]) >> 4) & 0x3ffc0ff;
+ key->key.r[3] = (get_unaligned_le32(&raw_key[9]) >> 6) & 0x3f03fff;
+ key->key.r[4] = (get_unaligned_le32(&raw_key[12]) >> 8) & 0x00fffff;
+
+ /* s = 5*r */
+ key->precomputed_s.r[0] = key->key.r[1] * 5;
+ key->precomputed_s.r[1] = key->key.r[2] * 5;
+ key->precomputed_s.r[2] = key->key.r[3] * 5;
+ key->precomputed_s.r[3] = key->key.r[4] * 5;
+}
+EXPORT_SYMBOL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ const u8 *input = src;
+ u32 r0, r1, r2, r3, r4;
+ u32 s1, s2, s3, s4;
+ u32 h0, h1, h2, h3, h4;
+ u64 d0, d1, d2, d3, d4;
+ u32 c;
+
+ if (!nblocks)
+ return;
+
+ hibit <<= 24;
+
+ r0 = key->key.r[0];
+ r1 = key->key.r[1];
+ r2 = key->key.r[2];
+ r3 = key->key.r[3];
+ r4 = key->key.r[4];
+
+ s1 = key->precomputed_s.r[0];
+ s2 = key->precomputed_s.r[1];
+ s3 = key->precomputed_s.r[2];
+ s4 = key->precomputed_s.r[3];
+
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ do {
+ /* h += m[i] */
+ h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff;
+ h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff;
+ h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff;
+ h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff;
+ h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit;
+
+ /* h *= r */
+ d0 = ((u64)h0 * r0) + ((u64)h1 * s4) +
+ ((u64)h2 * s3) + ((u64)h3 * s2) +
+ ((u64)h4 * s1);
+ d1 = ((u64)h0 * r1) + ((u64)h1 * r0) +
+ ((u64)h2 * s4) + ((u64)h3 * s3) +
+ ((u64)h4 * s2);
+ d2 = ((u64)h0 * r2) + ((u64)h1 * r1) +
+ ((u64)h2 * r0) + ((u64)h3 * s4) +
+ ((u64)h4 * s3);
+ d3 = ((u64)h0 * r3) + ((u64)h1 * r2) +
+ ((u64)h2 * r1) + ((u64)h3 * r0) +
+ ((u64)h4 * s4);
+ d4 = ((u64)h0 * r4) + ((u64)h1 * r3) +
+ ((u64)h2 * r2) + ((u64)h3 * r1) +
+ ((u64)h4 * r0);
+
+ /* (partial) h %= p */
+ c = (u32)(d0 >> 26);
+ h0 = (u32)d0 & 0x3ffffff;
+ d1 += c;
+ c = (u32)(d1 >> 26);
+ h1 = (u32)d1 & 0x3ffffff;
+ d2 += c;
+ c = (u32)(d2 >> 26);
+ h2 = (u32)d2 & 0x3ffffff;
+ d3 += c;
+ c = (u32)(d3 >> 26);
+ h3 = (u32)d3 & 0x3ffffff;
+ d4 += c;
+ c = (u32)(d4 >> 26);
+ h4 = (u32)d4 & 0x3ffffff;
+ h0 += c * 5;
+ c = (h0 >> 26);
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h[0] = h0;
+ state->h[1] = h1;
+ state->h[2] = h2;
+ state->h[3] = h3;
+ state->h[4] = h4;
+}
+EXPORT_SYMBOL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst)
+{
+ u8 *mac = dst;
+ u32 h0, h1, h2, h3, h4, c;
+ u32 g0, g1, g2, g3, g4;
+ u64 f;
+ u32 mask;
+
+ /* fully carry h */
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+
+ c = h1 >> 26;
+ h1 = h1 & 0x3ffffff;
+ h2 += c;
+ c = h2 >> 26;
+ h2 = h2 & 0x3ffffff;
+ h3 += c;
+ c = h3 >> 26;
+ h3 = h3 & 0x3ffffff;
+ h4 += c;
+ c = h4 >> 26;
+ h4 = h4 & 0x3ffffff;
+ h0 += c * 5;
+ c = h0 >> 26;
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 26;
+ g0 &= 0x3ffffff;
+ g1 = h1 + c;
+ c = g1 >> 26;
+ g1 &= 0x3ffffff;
+ g2 = h2 + c;
+ c = g2 >> 26;
+ g2 &= 0x3ffffff;
+ g3 = h3 + c;
+ c = g3 >> 26;
+ g3 &= 0x3ffffff;
+ g4 = h4 + c - (1UL << 26);
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ h0 = ((h0) | (h1 << 26)) & 0xffffffff;
+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
+
+ if (likely(nonce)) {
+ /* mac = (h + nonce) % (2^128) */
+ f = (u64)h0 + nonce[0];
+ h0 = (u32)f;
+ f = (u64)h1 + nonce[1] + (f >> 32);
+ h1 = (u32)f;
+ f = (u64)h2 + nonce[2] + (f >> 32);
+ h2 = (u32)f;
+ f = (u64)h3 + nonce[3] + (f >> 32);
+ h3 = (u32)f;
+ }
+
+ put_unaligned_le32(h0, &mac[0]);
+ put_unaligned_le32(h1, &mac[4]);
+ put_unaligned_le32(h2, &mac[8]);
+ put_unaligned_le32(h3, &mac[12]);
+}
+EXPORT_SYMBOL(poly1305_core_emit);
diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
new file mode 100644
index 000000000..988702c9b
--- /dev/null
+++ b/lib/crypto/poly1305-donna64.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This is based in part on Andrew Moon's poly1305-donna, which is in the
+ * public domain.
+ */
+
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/poly1305.h>
+
+void poly1305_core_setkey(struct poly1305_core_key *key,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ u64 t0, t1;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ t0 = get_unaligned_le64(&raw_key[0]);
+ t1 = get_unaligned_le64(&raw_key[8]);
+
+ key->key.r64[0] = t0 & 0xffc0fffffffULL;
+ key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL;
+ key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL;
+
+ /* s = 20*r */
+ key->precomputed_s.r64[0] = key->key.r64[1] * 20;
+ key->precomputed_s.r64[1] = key->key.r64[2] * 20;
+}
+EXPORT_SYMBOL(poly1305_core_setkey);
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit)
+{
+ const u8 *input = src;
+ u64 hibit64;
+ u64 r0, r1, r2;
+ u64 s1, s2;
+ u64 h0, h1, h2;
+ u64 c;
+ u128 d0, d1, d2, d;
+
+ if (!nblocks)
+ return;
+
+ hibit64 = ((u64)hibit) << 40;
+
+ r0 = key->key.r64[0];
+ r1 = key->key.r64[1];
+ r2 = key->key.r64[2];
+
+ h0 = state->h64[0];
+ h1 = state->h64[1];
+ h2 = state->h64[2];
+
+ s1 = key->precomputed_s.r64[0];
+ s2 = key->precomputed_s.r64[1];
+
+ do {
+ u64 t0, t1;
+
+ /* h += m[i] */
+ t0 = get_unaligned_le64(&input[0]);
+ t1 = get_unaligned_le64(&input[8]);
+
+ h0 += t0 & 0xfffffffffffULL;
+ h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL;
+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64;
+
+ /* h *= r */
+ d0 = (u128)h0 * r0;
+ d = (u128)h1 * s2;
+ d0 += d;
+ d = (u128)h2 * s1;
+ d0 += d;
+ d1 = (u128)h0 * r1;
+ d = (u128)h1 * r0;
+ d1 += d;
+ d = (u128)h2 * s2;
+ d1 += d;
+ d2 = (u128)h0 * r2;
+ d = (u128)h1 * r1;
+ d2 += d;
+ d = (u128)h2 * r0;
+ d2 += d;
+
+ /* (partial) h %= p */
+ c = (u64)(d0 >> 44);
+ h0 = (u64)d0 & 0xfffffffffffULL;
+ d1 += c;
+ c = (u64)(d1 >> 44);
+ h1 = (u64)d1 & 0xfffffffffffULL;
+ d2 += c;
+ c = (u64)(d2 >> 42);
+ h2 = (u64)d2 & 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 = h0 & 0xfffffffffffULL;
+ h1 += c;
+
+ input += POLY1305_BLOCK_SIZE;
+ } while (--nblocks);
+
+ state->h64[0] = h0;
+ state->h64[1] = h1;
+ state->h64[2] = h2;
+}
+EXPORT_SYMBOL(poly1305_core_blocks);
+
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst)
+{
+ u8 *mac = dst;
+ u64 h0, h1, h2, c;
+ u64 g0, g1, g2;
+ u64 t0, t1;
+
+ /* fully carry h */
+ h0 = state->h64[0];
+ h1 = state->h64[1];
+ h2 = state->h64[2];
+
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += c;
+ c = h2 >> 42;
+ h2 &= 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += c;
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += c;
+ c = h2 >> 42;
+ h2 &= 0x3ffffffffffULL;
+ h0 += c * 5;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 44;
+ g0 &= 0xfffffffffffULL;
+ g1 = h1 + c;
+ c = g1 >> 44;
+ g1 &= 0xfffffffffffULL;
+ g2 = h2 + c - (1ULL << 42);
+
+ /* select h if h < p, or h + -p if h >= p */
+ c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1;
+ g0 &= c;
+ g1 &= c;
+ g2 &= c;
+ c = ~c;
+ h0 = (h0 & c) | g0;
+ h1 = (h1 & c) | g1;
+ h2 = (h2 & c) | g2;
+
+ if (likely(nonce)) {
+ /* h = (h + nonce) */
+ t0 = ((u64)nonce[1] << 32) | nonce[0];
+ t1 = ((u64)nonce[3] << 32) | nonce[2];
+
+ h0 += t0 & 0xfffffffffffULL;
+ c = h0 >> 44;
+ h0 &= 0xfffffffffffULL;
+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c;
+ c = h1 >> 44;
+ h1 &= 0xfffffffffffULL;
+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c;
+ h2 &= 0x3ffffffffffULL;
+ }
+
+ /* mac = h % (2^128) */
+ h0 = h0 | (h1 << 44);
+ h1 = (h1 >> 20) | (h2 << 24);
+
+ put_unaligned_le64(h0, &mac[0]);
+ put_unaligned_le64(h1, &mac[8]);
+}
+EXPORT_SYMBOL(poly1305_core_emit);
diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
new file mode 100644
index 000000000..26d87fc38
--- /dev/null
+++ b/lib/crypto/poly1305.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Poly1305 authenticator algorithm, RFC7539
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * Based on public domain code by Andrew Moon and Daniel J. Bernstein.
+ */
+
+#include <crypto/internal/poly1305.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+
+void poly1305_init_generic(struct poly1305_desc_ctx *desc,
+ const u8 key[POLY1305_KEY_SIZE])
+{
+ poly1305_core_setkey(&desc->core_r, key);
+ desc->s[0] = get_unaligned_le32(key + 16);
+ desc->s[1] = get_unaligned_le32(key + 20);
+ desc->s[2] = get_unaligned_le32(key + 24);
+ desc->s[3] = get_unaligned_le32(key + 28);
+ poly1305_core_init(&desc->h);
+ desc->buflen = 0;
+ desc->sset = true;
+ desc->rset = 2;
+}
+EXPORT_SYMBOL_GPL(poly1305_init_generic);
+
+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes)
+{
+ unsigned int bytes;
+
+ if (unlikely(desc->buflen)) {
+ bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen);
+ memcpy(desc->buf + desc->buflen, src, bytes);
+ src += bytes;
+ nbytes -= bytes;
+ desc->buflen += bytes;
+
+ if (desc->buflen == POLY1305_BLOCK_SIZE) {
+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf,
+ 1, 1);
+ desc->buflen = 0;
+ }
+ }
+
+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
+ poly1305_core_blocks(&desc->h, &desc->core_r, src,
+ nbytes / POLY1305_BLOCK_SIZE, 1);
+ src += nbytes - (nbytes % POLY1305_BLOCK_SIZE);
+ nbytes %= POLY1305_BLOCK_SIZE;
+ }
+
+ if (unlikely(nbytes)) {
+ desc->buflen = nbytes;
+ memcpy(desc->buf, src, nbytes);
+ }
+}
+EXPORT_SYMBOL_GPL(poly1305_update_generic);
+
+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst)
+{
+ if (unlikely(desc->buflen)) {
+ desc->buf[desc->buflen++] = 1;
+ memset(desc->buf + desc->buflen, 0,
+ POLY1305_BLOCK_SIZE - desc->buflen);
+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0);
+ }
+
+ poly1305_core_emit(&desc->h, desc->s, dst);
+ *desc = (struct poly1305_desc_ctx){};
+}
+EXPORT_SYMBOL_GPL(poly1305_final_generic);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
diff --git a/lib/crypto/sha1.c b/lib/crypto/sha1.c
new file mode 100644
index 000000000..1aebe7be9
--- /dev/null
+++ b/lib/crypto/sha1.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SHA1 routine optimized to do word accesses rather than byte accesses,
+ * and to avoid unnecessary copies into the context array.
+ *
+ * This was based on the git SHA1 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <crypto/sha1.h>
+#include <asm/unaligned.h>
+
+/*
+ * If you have 32 registers or more, the compiler can (and should)
+ * try to change the array[] accesses into registers. However, on
+ * machines with less than ~25 registers, that won't really work,
+ * and at least gcc will make an unholy mess of it.
+ *
+ * So to avoid that mess which just slows things down, we force
+ * the stores to memory to actually happen (we might be better off
+ * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
+ * suggested by Artur Skawina - that will also make gcc unable to
+ * try to do the silly "optimize away loads" part because it won't
+ * see what the value will be).
+ *
+ * Ben Herrenschmidt reports that on PPC, the C version comes close
+ * to the optimized asm with this (ie on PPC you don't want that
+ * 'volatile', since there are lots of registers).
+ *
+ * On ARM we get the best code generation by forcing a full memory barrier
+ * between each SHA_ROUND, otherwise gcc happily get wild with spilling and
+ * the stack frame size simply explode and performance goes down the drain.
+ */
+
+#ifdef CONFIG_X86
+ #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
+#elif defined(CONFIG_ARM)
+ #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
+#else
+ #define setW(x, val) (W(x) = (val))
+#endif
+
+/* This "rolls" over the 512-bit array */
+#define W(x) (array[(x)&15])
+
+/*
+ * Where do we get the source from? The first 16 iterations get it from
+ * the input data, the next mix it from the 512-bit array.
+ */
+#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
+#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
+
+#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
+ __u32 TEMP = input(t); setW(t, TEMP); \
+ E += TEMP + rol32(A,5) + (fn) + (constant); \
+ B = ror32(B, 2); \
+ TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
+
+#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
+#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
+#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
+#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
+#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
+
+/**
+ * sha1_transform - single block SHA1 transform (deprecated)
+ *
+ * @digest: 160 bit digest to update
+ * @data: 512 bits of data to hash
+ * @array: 16 words of workspace (see note)
+ *
+ * This function executes SHA-1's internal compression function. It updates the
+ * 160-bit internal state (@digest) with a single 512-bit data block (@data).
+ *
+ * Don't use this function. SHA-1 is no longer considered secure. And even if
+ * you do have to use SHA-1, this isn't the correct way to hash something with
+ * SHA-1 as this doesn't handle padding and finalization.
+ *
+ * Note: If the hash is security sensitive, the caller should be sure
+ * to clear the workspace. This is left to the caller to avoid
+ * unnecessary clears between chained hashing operations.
+ */
+void sha1_transform(__u32 *digest, const char *data, __u32 *array)
+{
+ __u32 A, B, C, D, E;
+ unsigned int i = 0;
+
+ A = digest[0];
+ B = digest[1];
+ C = digest[2];
+ D = digest[3];
+ E = digest[4];
+
+ /* Round 1 - iterations 0-16 take their input from 'data' */
+ for (; i < 16; ++i)
+ T_0_15(i, A, B, C, D, E);
+
+ /* Round 1 - tail. Input from 512-bit mixing array */
+ for (; i < 20; ++i)
+ T_16_19(i, A, B, C, D, E);
+
+ /* Round 2 */
+ for (; i < 40; ++i)
+ T_20_39(i, A, B, C, D, E);
+
+ /* Round 3 */
+ for (; i < 60; ++i)
+ T_40_59(i, A, B, C, D, E);
+
+ /* Round 4 */
+ for (; i < 80; ++i)
+ T_60_79(i, A, B, C, D, E);
+
+ digest[0] += A;
+ digest[1] += B;
+ digest[2] += C;
+ digest[3] += D;
+ digest[4] += E;
+}
+EXPORT_SYMBOL(sha1_transform);
+
+/**
+ * sha1_init - initialize the vectors for a SHA1 digest
+ * @buf: vector to initialize
+ */
+void sha1_init(__u32 *buf)
+{
+ buf[0] = 0x67452301;
+ buf[1] = 0xefcdab89;
+ buf[2] = 0x98badcfe;
+ buf[3] = 0x10325476;
+ buf[4] = 0xc3d2e1f0;
+}
+EXPORT_SYMBOL(sha1_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
new file mode 100644
index 000000000..3ac1ef867
--- /dev/null
+++ b/lib/crypto/sha256.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SHA-256, as specified in
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ *
+ * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
+ *
+ * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2014 Red Hat Inc.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/sha256_base.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+static const u32 SHA256_K[] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
+};
+
+static inline u32 Ch(u32 x, u32 y, u32 z)
+{
+ return z ^ (x & (y ^ z));
+}
+
+static inline u32 Maj(u32 x, u32 y, u32 z)
+{
+ return (x & y) | (z & (x | y));
+}
+
+#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
+#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
+#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
+#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
+
+static inline void LOAD_OP(int I, u32 *W, const u8 *input)
+{
+ W[I] = get_unaligned_be32((__u32 *)input + I);
+}
+
+static inline void BLEND_OP(int I, u32 *W)
+{
+ W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+}
+
+#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
+ u32 t1, t2; \
+ t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
+ t2 = e0(a) + Maj(a, b, c); \
+ d += t1; \
+ h = t1 + t2; \
+} while (0)
+
+static void sha256_transform(u32 *state, const u8 *input, u32 *W)
+{
+ u32 a, b, c, d, e, f, g, h;
+ int i;
+
+ /* load the input */
+ for (i = 0; i < 16; i += 8) {
+ LOAD_OP(i + 0, W, input);
+ LOAD_OP(i + 1, W, input);
+ LOAD_OP(i + 2, W, input);
+ LOAD_OP(i + 3, W, input);
+ LOAD_OP(i + 4, W, input);
+ LOAD_OP(i + 5, W, input);
+ LOAD_OP(i + 6, W, input);
+ LOAD_OP(i + 7, W, input);
+ }
+
+ /* now blend */
+ for (i = 16; i < 64; i += 8) {
+ BLEND_OP(i + 0, W);
+ BLEND_OP(i + 1, W);
+ BLEND_OP(i + 2, W);
+ BLEND_OP(i + 3, W);
+ BLEND_OP(i + 4, W);
+ BLEND_OP(i + 5, W);
+ BLEND_OP(i + 6, W);
+ BLEND_OP(i + 7, W);
+ }
+
+ /* load the state into our registers */
+ a = state[0]; b = state[1]; c = state[2]; d = state[3];
+ e = state[4]; f = state[5]; g = state[6]; h = state[7];
+
+ /* now iterate */
+ for (i = 0; i < 64; i += 8) {
+ SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
+ SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
+ SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
+ SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
+ SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
+ SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
+ SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
+ SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
+ }
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+ state[4] += e; state[5] += f; state[6] += g; state[7] += h;
+}
+
+static void sha256_transform_blocks(struct sha256_state *sctx,
+ const u8 *input, int blocks)
+{
+ u32 W[64];
+
+ do {
+ sha256_transform(sctx->state, input, W);
+ input += SHA256_BLOCK_SIZE;
+ } while (--blocks);
+
+ memzero_explicit(W, sizeof(W));
+}
+
+void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
+{
+ lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks);
+}
+EXPORT_SYMBOL(sha256_update);
+
+static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size)
+{
+ lib_sha256_base_do_finalize(sctx, sha256_transform_blocks);
+ lib_sha256_base_finish(sctx, out, digest_size);
+}
+
+void sha256_final(struct sha256_state *sctx, u8 *out)
+{
+ __sha256_final(sctx, out, 32);
+}
+EXPORT_SYMBOL(sha256_final);
+
+void sha224_final(struct sha256_state *sctx, u8 *out)
+{
+ __sha256_final(sctx, out, 28);
+}
+EXPORT_SYMBOL(sha224_final);
+
+void sha256(const u8 *data, unsigned int len, u8 *out)
+{
+ struct sha256_state sctx;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, data, len);
+ sha256_final(&sctx, out);
+}
+EXPORT_SYMBOL(sha256);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c
new file mode 100644
index 000000000..c852c7151
--- /dev/null
+++ b/lib/crypto/utils.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Crypto library utility functions
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/utils.h>
+#include <linux/module.h>
+
+/*
+ * XOR @len bytes from @src1 and @src2 together, writing the result to @dst
+ * (which may alias one of the sources). Don't call this directly; call
+ * crypto_xor() or crypto_xor_cpy() instead.
+ */
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
+{
+ int relalign = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ int size = sizeof(unsigned long);
+ int d = (((unsigned long)dst ^ (unsigned long)src1) |
+ ((unsigned long)dst ^ (unsigned long)src2)) &
+ (size - 1);
+
+ relalign = d ? 1 << __ffs(d) : size;
+
+ /*
+ * If we care about alignment, process as many bytes as
+ * needed to advance dst and src to values whose alignments
+ * equal their relative alignment. This will allow us to
+ * process the remainder of the input using optimal strides.
+ */
+ while (((unsigned long)dst & (relalign - 1)) && len > 0) {
+ *dst++ = *src1++ ^ *src2++;
+ len--;
+ }
+ }
+
+ while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u64 l = get_unaligned((u64 *)src1) ^
+ get_unaligned((u64 *)src2);
+ put_unaligned(l, (u64 *)dst);
+ } else {
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
+ }
+ dst += 8;
+ src1 += 8;
+ src2 += 8;
+ len -= 8;
+ }
+
+ while (len >= 4 && !(relalign & 3)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u32 l = get_unaligned((u32 *)src1) ^
+ get_unaligned((u32 *)src2);
+ put_unaligned(l, (u32 *)dst);
+ } else {
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
+ }
+ dst += 4;
+ src1 += 4;
+ src2 += 4;
+ len -= 4;
+ }
+
+ while (len >= 2 && !(relalign & 1)) {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
+ u16 l = get_unaligned((u16 *)src1) ^
+ get_unaligned((u16 *)src2);
+ put_unaligned(l, (u16 *)dst);
+ } else {
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
+ }
+ dst += 2;
+ src1 += 2;
+ src2 += 2;
+ len -= 2;
+ }
+
+ while (len--)
+ *dst++ = *src1++ ^ *src2++;
+}
+EXPORT_SYMBOL_GPL(__crypto_xor);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/ctype.c b/lib/ctype.c
new file mode 100644
index 000000000..c819fe269
--- /dev/null
+++ b/lib/ctype.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/lib/ctype.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/ctype.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+
+const unsigned char _ctype[] = {
+_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
+_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
+_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
+_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
+_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
+_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
+_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
+_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
+_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
+_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
+_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
+_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
+_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
+_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
+_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
+_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
+_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
+_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
+
+EXPORT_SYMBOL(_ctype);
diff --git a/lib/debug_info.c b/lib/debug_info.c
new file mode 100644
index 000000000..cc4723c74
--- /dev/null
+++ b/lib/debug_info.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file exists solely to ensure debug information for some core
+ * data structures is included in the final image even for
+ * CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However,
+ * adding appropriate #includes is fine.
+ */
+#include <linux/cred.h>
+#include <linux/crypto.h>
+#include <linux/dcache.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/fscache-cache.h>
+#include <linux/io.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
new file mode 100644
index 000000000..a75ee30b7
--- /dev/null
+++ b/lib/debug_locks.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * lib/debug_locks.c
+ *
+ * Generic place for common debugging facilities for various locks:
+ * spinlocks, rwlocks, mutexes and rwsems.
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/debug_locks.h>
+
+/*
+ * We want to turn all lock-debugging facilities on/off at once,
+ * via a global flag. The reason is that once a single bug has been
+ * detected and reported, there might be cascade of followup bugs
+ * that would just muddy the log. So we report the first one and
+ * shut up after that.
+ */
+int debug_locks __read_mostly = 1;
+EXPORT_SYMBOL_GPL(debug_locks);
+
+/*
+ * The locking-testsuite uses <debug_locks_silent> to get a
+ * 'silent failure': nothing is printed to the console when
+ * a locking bug is detected.
+ */
+int debug_locks_silent __read_mostly;
+EXPORT_SYMBOL_GPL(debug_locks_silent);
+
+/*
+ * Generic 'turn off all lock debugging' function:
+ */
+int debug_locks_off(void)
+{
+ if (debug_locks && __debug_locks_off()) {
+ if (!debug_locks_silent) {
+ console_verbose();
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(debug_locks_off);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
new file mode 100644
index 000000000..a517256a2
--- /dev/null
+++ b/lib/debugobjects.c
@@ -0,0 +1,1450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic infrastructure for lifetime debugging of objects.
+ *
+ * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
+ */
+
+#define pr_fmt(fmt) "ODEBUG: " fmt
+
+#include <linux/debugobjects.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+#include <linux/kmemleak.h>
+#include <linux/cpu.h>
+
+#define ODEBUG_HASH_BITS 14
+#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
+
+#define ODEBUG_POOL_SIZE 1024
+#define ODEBUG_POOL_MIN_LEVEL 256
+#define ODEBUG_POOL_PERCPU_SIZE 64
+#define ODEBUG_BATCH_SIZE 16
+
+#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
+#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
+#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
+
+/*
+ * We limit the freeing of debug objects via workqueue at a maximum
+ * frequency of 10Hz and about 1024 objects for each freeing operation.
+ * So it is freeing at most 10k debug objects per second.
+ */
+#define ODEBUG_FREE_WORK_MAX 1024
+#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
+
+struct debug_bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/*
+ * Debug object percpu free list
+ * Access is protected by disabling irq
+ */
+struct debug_percpu_free {
+ struct hlist_head free_objs;
+ int obj_free;
+};
+
+static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
+
+static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
+
+static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
+
+static DEFINE_RAW_SPINLOCK(pool_lock);
+
+static HLIST_HEAD(obj_pool);
+static HLIST_HEAD(obj_to_free);
+
+/*
+ * Because of the presence of percpu free pools, obj_pool_free will
+ * under-count those in the percpu free pools. Similarly, obj_pool_used
+ * will over-count those in the percpu free pools. Adjustments will be
+ * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
+ * can be off.
+ */
+static int obj_pool_min_free = ODEBUG_POOL_SIZE;
+static int obj_pool_free = ODEBUG_POOL_SIZE;
+static int obj_pool_used;
+static int obj_pool_max_used;
+static bool obj_freeing;
+/* The number of objs on the global free list */
+static int obj_nr_tofree;
+
+static int debug_objects_maxchain __read_mostly;
+static int __maybe_unused debug_objects_maxchecked __read_mostly;
+static int debug_objects_fixups __read_mostly;
+static int debug_objects_warnings __read_mostly;
+static int debug_objects_enabled __read_mostly
+ = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+static int debug_objects_pool_size __read_mostly
+ = ODEBUG_POOL_SIZE;
+static int debug_objects_pool_min_level __read_mostly
+ = ODEBUG_POOL_MIN_LEVEL;
+static const struct debug_obj_descr *descr_test __read_mostly;
+static struct kmem_cache *obj_cache __read_mostly;
+
+/*
+ * Track numbers of kmem_cache_alloc()/free() calls done.
+ */
+static int debug_objects_allocated;
+static int debug_objects_freed;
+
+static void free_obj_work(struct work_struct *work);
+static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
+
+static int __init enable_object_debug(char *str)
+{
+ debug_objects_enabled = 1;
+ return 0;
+}
+
+static int __init disable_object_debug(char *str)
+{
+ debug_objects_enabled = 0;
+ return 0;
+}
+
+early_param("debug_objects", enable_object_debug);
+early_param("no_debug_objects", disable_object_debug);
+
+static const char *obj_states[ODEBUG_STATE_MAX] = {
+ [ODEBUG_STATE_NONE] = "none",
+ [ODEBUG_STATE_INIT] = "initialized",
+ [ODEBUG_STATE_INACTIVE] = "inactive",
+ [ODEBUG_STATE_ACTIVE] = "active",
+ [ODEBUG_STATE_DESTROYED] = "destroyed",
+ [ODEBUG_STATE_NOTAVAILABLE] = "not available",
+};
+
+static void fill_pool(void)
+{
+ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
+ return;
+
+ /*
+ * Reuse objs from the global free list; they will be reinitialized
+ * when allocating.
+ *
+ * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
+ * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
+ * sections.
+ */
+ while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ /*
+ * Recheck with the lock held as the worker thread might have
+ * won the race and freed the global free list already.
+ */
+ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
+ hlist_add_head(&obj->node, &obj_pool);
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ }
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ }
+
+ if (unlikely(!obj_cache))
+ return;
+
+ while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
+ struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ int cnt;
+
+ for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
+ new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new[cnt])
+ break;
+ }
+ if (!cnt)
+ return;
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ while (cnt) {
+ hlist_add_head(&new[--cnt]->node, &obj_pool);
+ debug_objects_allocated++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ }
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ }
+}
+
+/*
+ * Lookup an object in the hash bucket.
+ */
+static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
+{
+ struct debug_obj *obj;
+ int cnt = 0;
+
+ hlist_for_each_entry(obj, &b->list, node) {
+ cnt++;
+ if (obj->object == addr)
+ return obj;
+ }
+ if (cnt > debug_objects_maxchain)
+ debug_objects_maxchain = cnt;
+
+ return NULL;
+}
+
+/*
+ * Allocate a new object from the hlist
+ */
+static struct debug_obj *__alloc_object(struct hlist_head *list)
+{
+ struct debug_obj *obj = NULL;
+
+ if (list->first) {
+ obj = hlist_entry(list->first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ }
+
+ return obj;
+}
+
+static struct debug_obj *
+alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
+{
+ struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ struct debug_obj *obj;
+
+ if (likely(obj_cache)) {
+ obj = __alloc_object(&percpu_pool->free_objs);
+ if (obj) {
+ percpu_pool->obj_free--;
+ goto init_obj;
+ }
+ }
+
+ raw_spin_lock(&pool_lock);
+ obj = __alloc_object(&obj_pool);
+ if (obj) {
+ obj_pool_used++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+
+ /*
+ * Looking ahead, allocate one batch of debug objects and
+ * put them into the percpu free pool.
+ */
+ if (likely(obj_cache)) {
+ int i;
+
+ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ struct debug_obj *obj2;
+
+ obj2 = __alloc_object(&obj_pool);
+ if (!obj2)
+ break;
+ hlist_add_head(&obj2->node,
+ &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ obj_pool_used++;
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ }
+ }
+
+ if (obj_pool_used > obj_pool_max_used)
+ obj_pool_max_used = obj_pool_used;
+
+ if (obj_pool_free < obj_pool_min_free)
+ obj_pool_min_free = obj_pool_free;
+ }
+ raw_spin_unlock(&pool_lock);
+
+init_obj:
+ if (obj) {
+ obj->object = addr;
+ obj->descr = descr;
+ obj->state = ODEBUG_STATE_NONE;
+ obj->astate = 0;
+ hlist_add_head(&obj->node, &b->list);
+ }
+ return obj;
+}
+
+/*
+ * workqueue function to free objects.
+ *
+ * To reduce contention on the global pool_lock, the actual freeing of
+ * debug objects will be delayed if the pool_lock is busy.
+ */
+static void free_obj_work(struct work_struct *work)
+{
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+ unsigned long flags;
+ HLIST_HEAD(tofree);
+
+ WRITE_ONCE(obj_freeing, false);
+ if (!raw_spin_trylock_irqsave(&pool_lock, flags))
+ return;
+
+ if (obj_pool_free >= debug_objects_pool_size)
+ goto free_objs;
+
+ /*
+ * The objs on the pool list might be allocated before the work is
+ * run, so recheck if pool list it full or not, if not fill pool
+ * list from the global free list. As it is likely that a workload
+ * may be gearing up to use more and more objects, don't free any
+ * of them until the next round.
+ */
+ while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
+ obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
+ hlist_del(&obj->node);
+ hlist_add_head(&obj->node, &obj_pool);
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
+ }
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+ return;
+
+free_objs:
+ /*
+ * Pool list is already full and there are still objs on the free
+ * list. Move remaining free objs to a temporary list to free the
+ * memory outside the pool_lock held region.
+ */
+ if (obj_nr_tofree) {
+ hlist_move_list(&obj_to_free, &tofree);
+ debug_objects_freed += obj_nr_tofree;
+ WRITE_ONCE(obj_nr_tofree, 0);
+ }
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+
+ hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+}
+
+static void __free_object(struct debug_obj *obj)
+{
+ struct debug_obj *objs[ODEBUG_BATCH_SIZE];
+ struct debug_percpu_free *percpu_pool;
+ int lookahead_count = 0;
+ unsigned long flags;
+ bool work;
+
+ local_irq_save(flags);
+ if (!obj_cache)
+ goto free_to_obj_pool;
+
+ /*
+ * Try to free it into the percpu pool first.
+ */
+ percpu_pool = this_cpu_ptr(&percpu_obj_pool);
+ if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
+ hlist_add_head(&obj->node, &percpu_pool->free_objs);
+ percpu_pool->obj_free++;
+ local_irq_restore(flags);
+ return;
+ }
+
+ /*
+ * As the percpu pool is full, look ahead and pull out a batch
+ * of objects from the percpu pool and free them as well.
+ */
+ for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
+ objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
+ if (!objs[lookahead_count])
+ break;
+ percpu_pool->obj_free--;
+ }
+
+free_to_obj_pool:
+ raw_spin_lock(&pool_lock);
+ work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
+ (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
+ obj_pool_used--;
+
+ if (work) {
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
+ hlist_add_head(&obj->node, &obj_to_free);
+ if (lookahead_count) {
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
+ obj_pool_used -= lookahead_count;
+ while (lookahead_count) {
+ hlist_add_head(&objs[--lookahead_count]->node,
+ &obj_to_free);
+ }
+ }
+
+ if ((obj_pool_free > debug_objects_pool_size) &&
+ (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
+ int i;
+
+ /*
+ * Free one more batch of objects from obj_pool.
+ */
+ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
+ obj = __alloc_object(&obj_pool);
+ hlist_add_head(&obj->node, &obj_to_free);
+ WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
+ WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
+ }
+ }
+ } else {
+ WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
+ hlist_add_head(&obj->node, &obj_pool);
+ if (lookahead_count) {
+ WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
+ obj_pool_used -= lookahead_count;
+ while (lookahead_count) {
+ hlist_add_head(&objs[--lookahead_count]->node,
+ &obj_pool);
+ }
+ }
+ }
+ raw_spin_unlock(&pool_lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * Put the object back into the pool and schedule work to free objects
+ * if necessary.
+ */
+static void free_object(struct debug_obj *obj)
+{
+ __free_object(obj);
+ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ WRITE_ONCE(obj_freeing, true);
+ schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int object_cpu_offline(unsigned int cpu)
+{
+ struct debug_percpu_free *percpu_pool;
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ /* Remote access is safe as the CPU is dead already */
+ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
+ hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ obj_pool_used -= percpu_pool->obj_free;
+ debug_objects_freed += percpu_pool->obj_free;
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+
+ percpu_pool->obj_free = 0;
+
+ return 0;
+}
+#endif
+
+/*
+ * We run out of memory. That means we probably have tons of objects
+ * allocated.
+ */
+static void debug_objects_oom(void)
+{
+ struct debug_bucket *db = obj_hash;
+ struct hlist_node *tmp;
+ HLIST_HEAD(freelist);
+ struct debug_obj *obj;
+ unsigned long flags;
+ int i;
+
+ pr_warn("Out of memory. ODEBUG disabled\n");
+
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+ raw_spin_lock_irqsave(&db->lock, flags);
+ hlist_move_list(&db->list, &freelist);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+ }
+}
+
+/*
+ * We use the pfn of the address for the hash. That way we can check
+ * for freed objects simply by checking the affected bucket.
+ */
+static struct debug_bucket *get_bucket(unsigned long addr)
+{
+ unsigned long hash;
+
+ hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
+ return &obj_hash[hash];
+}
+
+static void debug_print_object(struct debug_obj *obj, char *msg)
+{
+ const struct debug_obj_descr *descr = obj->descr;
+ static int limit;
+
+ /*
+ * Don't report if lookup_object_or_alloc() by the current thread
+ * failed because lookup_object_or_alloc()/debug_objects_oom() by a
+ * concurrent thread turned off debug_objects_enabled and cleared
+ * the hash buckets.
+ */
+ if (!debug_objects_enabled)
+ return;
+
+ if (limit < 5 && descr != descr_test) {
+ void *hint = descr->debug_hint ?
+ descr->debug_hint(obj->object) : NULL;
+ limit++;
+ WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
+ "object: %p object type: %s hint: %pS\n",
+ msg, obj_states[obj->state], obj->astate,
+ obj->object, descr->name, hint);
+ }
+ debug_objects_warnings++;
+}
+
+/*
+ * Try to repair the damage, so we have a better chance to get useful
+ * debug output.
+ */
+static bool
+debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
+ void * addr, enum debug_obj_state state)
+{
+ if (fixup && fixup(addr, state)) {
+ debug_objects_fixups++;
+ return true;
+ }
+ return false;
+}
+
+static void debug_object_is_on_stack(void *addr, int onstack)
+{
+ int is_on_stack;
+ static int limit;
+
+ if (limit > 4)
+ return;
+
+ is_on_stack = object_is_on_stack(addr);
+ if (is_on_stack == onstack)
+ return;
+
+ limit++;
+ if (is_on_stack)
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
+ else
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
+ WARN_ON(1);
+}
+
+static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
+ const struct debug_obj_descr *descr,
+ bool onstack, bool alloc_ifstatic)
+{
+ struct debug_obj *obj = lookup_object(addr, b);
+ enum debug_obj_state state = ODEBUG_STATE_NONE;
+
+ if (likely(obj))
+ return obj;
+
+ /*
+ * debug_object_init() unconditionally allocates untracked
+ * objects. It does not matter whether it is a static object or
+ * not.
+ *
+ * debug_object_assert_init() and debug_object_activate() allow
+ * allocation only if the descriptor callback confirms that the
+ * object is static and considered initialized. For non-static
+ * objects the allocation needs to be done from the fixup callback.
+ */
+ if (unlikely(alloc_ifstatic)) {
+ if (!descr->is_static_object || !descr->is_static_object(addr))
+ return ERR_PTR(-ENOENT);
+ /* Statically allocated objects are considered initialized */
+ state = ODEBUG_STATE_INIT;
+ }
+
+ obj = alloc_object(addr, b, descr);
+ if (likely(obj)) {
+ obj->state = state;
+ debug_object_is_on_stack(addr, onstack);
+ return obj;
+ }
+
+ /* Out of memory. Do the cleanup outside of the locked region */
+ debug_objects_enabled = 0;
+ return NULL;
+}
+
+static void debug_objects_fill_pool(void)
+{
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context -- for !RT kernels we rely on the fact that spinlock_t and
+ * raw_spinlock_t are basically the same type and this lock-type
+ * inversion works just fine.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ /*
+ * Annotate away the spinlock_t inside raw_spinlock_t warning
+ * by temporarily raising the wait-type to WAIT_SLEEP, matching
+ * the preemptible() condition above.
+ */
+ static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
+ lock_map_acquire_try(&fill_pool_map);
+ fill_pool();
+ lock_map_release(&fill_pool_map);
+ }
+}
+
+static void
+__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ debug_objects_fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
+ }
+
+ switch (obj->state) {
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_INIT;
+ break;
+
+ case ODEBUG_STATE_ACTIVE:
+ state = obj->state;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "init");
+ debug_object_fixup(descr->fixup_init, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "init");
+ return;
+ default:
+ break;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_init - debug checks when an object is initialized
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_init(void *addr, const struct debug_obj_descr *descr)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ __debug_object_init(addr, descr, 0);
+}
+EXPORT_SYMBOL_GPL(debug_object_init);
+
+/**
+ * debug_object_init_on_stack - debug checks when an object on stack is
+ * initialized
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ __debug_object_init(addr, descr, 1);
+}
+EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
+
+/**
+ * debug_object_activate - debug checks when an object is activated
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ * Returns 0 for success, -EINVAL for check failed.
+ */
+int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ int ret;
+
+ if (!debug_objects_enabled)
+ return 0;
+
+ debug_objects_fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ if (likely(!IS_ERR_OR_NULL(obj))) {
+ bool print_object = false;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ ret = 0;
+ break;
+
+ case ODEBUG_STATE_ACTIVE:
+ state = obj->state;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, state);
+ return ret ? 0 : -EINVAL;
+
+ case ODEBUG_STATE_DESTROYED:
+ print_object = true;
+ ret = -EINVAL;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (print_object)
+ debug_print_object(obj, "activate");
+ return ret;
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return 0;
+ }
+
+ /* Object is neither static nor tracked. It's not initialized */
+ debug_print_object(&o, "activate");
+ ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
+ return ret ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(debug_object_activate);
+
+/**
+ * debug_object_deactivate - debug checks when an object is deactivated
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ bool print_object = false;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ case ODEBUG_STATE_ACTIVE:
+ if (!obj->astate)
+ obj->state = ODEBUG_STATE_INACTIVE;
+ else
+ print_object = true;
+ break;
+
+ case ODEBUG_STATE_DESTROYED:
+ print_object = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (!obj) {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ debug_print_object(&o, "deactivate");
+ } else if (print_object) {
+ debug_print_object(obj, "deactivate");
+ }
+}
+EXPORT_SYMBOL_GPL(debug_object_deactivate);
+
+/**
+ * debug_object_destroy - debug checks when an object is destroyed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ bool print_object = false;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj)
+ goto out_unlock;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_DESTROYED;
+ break;
+ case ODEBUG_STATE_ACTIVE:
+ state = obj->state;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "destroy");
+ debug_object_fixup(descr->fixup_destroy, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ print_object = true;
+ break;
+ default:
+ break;
+ }
+out_unlock:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (print_object)
+ debug_print_object(obj, "destroy");
+}
+EXPORT_SYMBOL_GPL(debug_object_destroy);
+
+/**
+ * debug_object_free - debug checks when an object is freed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_free(void *addr, const struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj)
+ goto out_unlock;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ state = obj->state;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "free");
+ debug_object_fixup(descr->fixup_free, addr, state);
+ return;
+ default:
+ hlist_del(&obj->node);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ free_object(obj);
+ return;
+ }
+out_unlock:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+EXPORT_SYMBOL_GPL(debug_object_free);
+
+/**
+ * debug_object_assert_init - debug checks when object should be init-ed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
+{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ debug_objects_fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+ obj = lookup_object_or_alloc(addr, db, descr, false, true);
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (likely(!IS_ERR_OR_NULL(obj)))
+ return;
+
+ /* If NULL the allocation has hit OOM */
+ if (!obj) {
+ debug_objects_oom();
+ return;
+ }
+
+ /* Object is neither tracked nor static. It's not initialized. */
+ debug_print_object(&o, "assert_init");
+ debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
+}
+EXPORT_SYMBOL_GPL(debug_object_assert_init);
+
+/**
+ * debug_object_active_state - debug checks object usage state machine
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ * @expect: expected state
+ * @next: state to move to if expected state is found
+ */
+void
+debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
+ unsigned int expect, unsigned int next)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ bool print_object = false;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ if (obj->astate == expect)
+ obj->astate = next;
+ else
+ print_object = true;
+ break;
+
+ default:
+ print_object = true;
+ break;
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (!obj) {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ debug_print_object(&o, "active_state");
+ } else if (print_object) {
+ debug_print_object(obj, "active_state");
+ }
+}
+EXPORT_SYMBOL_GPL(debug_object_active_state);
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+ unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
+ const struct debug_obj_descr *descr;
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct hlist_node *tmp;
+ struct debug_obj *obj;
+ int cnt, objs_checked = 0;
+
+ saddr = (unsigned long) address;
+ eaddr = saddr + size;
+ paddr = saddr & ODEBUG_CHUNK_MASK;
+ chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
+ chunks >>= ODEBUG_CHUNK_SHIFT;
+
+ for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
+ db = get_bucket(paddr);
+
+repeat:
+ cnt = 0;
+ raw_spin_lock_irqsave(&db->lock, flags);
+ hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
+ cnt++;
+ oaddr = (unsigned long) obj->object;
+ if (oaddr < saddr || oaddr >= eaddr)
+ continue;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ descr = obj->descr;
+ state = obj->state;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(obj, "free");
+ debug_object_fixup(descr->fixup_free,
+ (void *) oaddr, state);
+ goto repeat;
+ default:
+ hlist_del(&obj->node);
+ __free_object(obj);
+ break;
+ }
+ }
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+
+ if (cnt > debug_objects_maxchain)
+ debug_objects_maxchain = cnt;
+
+ objs_checked += cnt;
+ }
+
+ if (objs_checked > debug_objects_maxchecked)
+ debug_objects_maxchecked = objs_checked;
+
+ /* Schedule work to actually kmem_cache_free() objects */
+ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
+ WRITE_ONCE(obj_freeing, true);
+ schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
+ }
+}
+
+void debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+ if (debug_objects_enabled)
+ __debug_check_no_obj_freed(address, size);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debug_stats_show(struct seq_file *m, void *v)
+{
+ int cpu, obj_percpu_free = 0;
+
+ for_each_possible_cpu(cpu)
+ obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
+
+ seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
+ seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
+ seq_printf(m, "warnings :%d\n", debug_objects_warnings);
+ seq_printf(m, "fixups :%d\n", debug_objects_fixups);
+ seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
+ seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
+ seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
+ seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
+ seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+ seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
+ seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
+ seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debug_stats);
+
+static int __init debug_objects_init_debugfs(void)
+{
+ struct dentry *dbgdir;
+
+ if (!debug_objects_enabled)
+ return 0;
+
+ dbgdir = debugfs_create_dir("debug_objects", NULL);
+
+ debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
+
+ return 0;
+}
+__initcall(debug_objects_init_debugfs);
+
+#else
+static inline void debug_objects_init_debugfs(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
+
+/* Random data structure for the self test */
+struct self_test {
+ unsigned long dummy1[6];
+ int static_init;
+ unsigned long dummy2[3];
+};
+
+static __initconst const struct debug_obj_descr descr_type_test;
+
+static bool __init is_static_object(void *addr)
+{
+ struct self_test *obj = addr;
+
+ return obj->static_init;
+}
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static bool __init fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_init(obj, &descr_type_test);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown non-static object is activated
+ */
+static bool __init fixup_activate(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ return true;
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_activate(obj, &descr_type_test);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/*
+ * fixup_destroy is called when:
+ * - an active object is destroyed
+ */
+static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_destroy(obj, &descr_type_test);
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static bool __init fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_free(obj, &descr_type_test);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int __init
+check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ int res = -EINVAL;
+
+ db = get_bucket((unsigned long) addr);
+
+ raw_spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj && state != ODEBUG_STATE_NONE) {
+ WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
+ goto out;
+ }
+ if (obj && obj->state != state) {
+ WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+ obj->state, state);
+ goto out;
+ }
+ if (fixups != debug_objects_fixups) {
+ WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+ fixups, debug_objects_fixups);
+ goto out;
+ }
+ if (warnings != debug_objects_warnings) {
+ WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+ warnings, debug_objects_warnings);
+ goto out;
+ }
+ res = 0;
+out:
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ if (res)
+ debug_objects_enabled = 0;
+ return res;
+}
+
+static __initconst const struct debug_obj_descr descr_type_test = {
+ .name = "selftest",
+ .is_static_object = is_static_object,
+ .fixup_init = fixup_init,
+ .fixup_activate = fixup_activate,
+ .fixup_destroy = fixup_destroy,
+ .fixup_free = fixup_free,
+};
+
+static __initdata struct self_test obj = { .static_init = 0 };
+
+static void __init debug_objects_selftest(void)
+{
+ int fixups, oldfixups, warnings, oldwarnings;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ fixups = oldfixups = debug_objects_fixups;
+ warnings = oldwarnings = debug_objects_warnings;
+ descr_test = &descr_type_test;
+
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
+ goto out;
+ debug_object_deactivate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
+ goto out;
+ debug_object_destroy(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
+ goto out;
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_deactivate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_free(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+ goto out;
+
+ obj.static_init = 1;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
+ goto out;
+ debug_object_free(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+ goto out;
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ __debug_check_no_obj_freed(&obj, sizeof(obj));
+ if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
+ goto out;
+#endif
+ pr_info("selftest passed\n");
+
+out:
+ debug_objects_fixups = oldfixups;
+ debug_objects_warnings = oldwarnings;
+ descr_test = NULL;
+
+ local_irq_restore(flags);
+}
+#else
+static inline void debug_objects_selftest(void) { }
+#endif
+
+/*
+ * Called during early boot to initialize the hash buckets and link
+ * the static object pool objects into the poll list. After this call
+ * the object tracker is fully operational.
+ */
+void __init debug_objects_early_init(void)
+{
+ int i;
+
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++)
+ raw_spin_lock_init(&obj_hash[i].lock);
+
+ for (i = 0; i < ODEBUG_POOL_SIZE; i++)
+ hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+}
+
+/*
+ * Convert the statically allocated objects to dynamic ones:
+ */
+static int __init debug_objects_replace_static_objects(void)
+{
+ struct debug_bucket *db = obj_hash;
+ struct hlist_node *tmp;
+ struct debug_obj *obj, *new;
+ HLIST_HEAD(objects);
+ int i, cnt = 0;
+
+ for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
+ obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
+ if (!obj)
+ goto free;
+ hlist_add_head(&obj->node, &objects);
+ }
+
+ debug_objects_allocated += i;
+
+ /*
+ * debug_objects_mem_init() is now called early that only one CPU is up
+ * and interrupts have been disabled, so it is safe to replace the
+ * active object references.
+ */
+
+ /* Remove the statically allocated objects from the pool */
+ hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
+ hlist_del(&obj->node);
+ /* Move the allocated objects to the pool */
+ hlist_move_list(&objects, &obj_pool);
+
+ /* Replace the active object references */
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+ hlist_move_list(&db->list, &objects);
+
+ hlist_for_each_entry(obj, &objects, node) {
+ new = hlist_entry(obj_pool.first, typeof(*obj), node);
+ hlist_del(&new->node);
+ /* copy object data */
+ *new = *obj;
+ hlist_add_head(&new->node, &db->list);
+ cnt++;
+ }
+ }
+
+ pr_debug("%d of %d active objects replaced\n",
+ cnt, obj_pool_used);
+ return 0;
+free:
+ hlist_for_each_entry_safe(obj, tmp, &objects, node) {
+ hlist_del(&obj->node);
+ kmem_cache_free(obj_cache, obj);
+ }
+ return -ENOMEM;
+}
+
+/*
+ * Called after the kmem_caches are functional to setup a dedicated
+ * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
+ * prevents that the debug code is called on kmem_cache_free() for the
+ * debug tracker objects to avoid recursive calls.
+ */
+void __init debug_objects_mem_init(void)
+{
+ int cpu, extras;
+
+ if (!debug_objects_enabled)
+ return;
+
+ /*
+ * Initialize the percpu object pools
+ *
+ * Initialization is not strictly necessary, but was done for
+ * completeness.
+ */
+ for_each_possible_cpu(cpu)
+ INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
+
+ obj_cache = kmem_cache_create("debug_objects_cache",
+ sizeof (struct debug_obj), 0,
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
+ NULL);
+
+ if (!obj_cache || debug_objects_replace_static_objects()) {
+ debug_objects_enabled = 0;
+ kmem_cache_destroy(obj_cache);
+ pr_warn("out of memory.\n");
+ return;
+ } else
+ debug_objects_selftest();
+
+#ifdef CONFIG_HOTPLUG_CPU
+ cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
+ object_cpu_offline);
+#endif
+
+ /*
+ * Increase the thresholds for allocating and freeing objects
+ * according to the number of possible CPUs available in the system.
+ */
+ extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
+ debug_objects_pool_size += extras;
+ debug_objects_pool_min_level += extras;
+}
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
new file mode 100644
index 000000000..1dcca8f2e
--- /dev/null
+++ b/lib/dec_and_lock.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+/*
+ * This is an implementation of the notion of "decrement a
+ * reference count, and return locked if it decremented to zero".
+ *
+ * NOTE NOTE NOTE! This is _not_ equivalent to
+ *
+ * if (atomic_dec_and_test(&atomic)) {
+ * spin_lock(&lock);
+ * return 1;
+ * }
+ * return 0;
+ *
+ * because the spin-lock and the decrement must be
+ * "atomic".
+ */
+int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock(lock);
+ return 0;
+}
+
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
+
+int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock(lock);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+
+int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ raw_spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ raw_spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
diff --git a/lib/decompress.c b/lib/decompress.c
new file mode 100644
index 000000000..ab3fc90ff
--- /dev/null
+++ b/lib/decompress.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * decompress.c
+ *
+ * Detect the decompression method based on magic number
+ */
+
+#include <linux/decompress/generic.h>
+
+#include <linux/decompress/bunzip2.h>
+#include <linux/decompress/unlzma.h>
+#include <linux/decompress/unxz.h>
+#include <linux/decompress/inflate.h>
+#include <linux/decompress/unlzo.h>
+#include <linux/decompress/unlz4.h>
+#include <linux/decompress/unzstd.h>
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+
+#ifndef CONFIG_DECOMPRESS_GZIP
+# define gunzip NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_BZIP2
+# define bunzip2 NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_LZMA
+# define unlzma NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_XZ
+# define unxz NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_LZO
+# define unlzo NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_LZ4
+# define unlz4 NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_ZSTD
+# define unzstd NULL
+#endif
+
+struct compress_format {
+ unsigned char magic[2];
+ const char *name;
+ decompress_fn decompressor;
+};
+
+static const struct compress_format compressed_formats[] __initconst = {
+ { {0x1f, 0x8b}, "gzip", gunzip },
+ { {0x1f, 0x9e}, "gzip", gunzip },
+ { {0x42, 0x5a}, "bzip2", bunzip2 },
+ { {0x5d, 0x00}, "lzma", unlzma },
+ { {0xfd, 0x37}, "xz", unxz },
+ { {0x89, 0x4c}, "lzo", unlzo },
+ { {0x02, 0x21}, "lz4", unlz4 },
+ { {0x28, 0xb5}, "zstd", unzstd },
+ { {0, 0}, NULL, NULL }
+};
+
+decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
+ const char **name)
+{
+ const struct compress_format *cf;
+
+ if (len < 2) {
+ if (name)
+ *name = NULL;
+ return NULL; /* Need at least this much... */
+ }
+
+ pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
+
+ for (cf = compressed_formats; cf->name; cf++) {
+ if (!memcmp(inbuf, cf->magic, 2))
+ break;
+
+ }
+ if (name)
+ *name = cf->name;
+ return cf->decompressor;
+}
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
new file mode 100644
index 000000000..3518e7394
--- /dev/null
+++ b/lib/decompress_bunzip2.c
@@ -0,0 +1,756 @@
+/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
+
+ Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
+ which also acknowledges contributions by Mike Burrows, David Wheeler,
+ Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
+ Robert Sedgewick, and Jon L. Bentley.
+
+ This code is licensed under the LGPLv2:
+ LGPL (http://www.gnu.org/copyleft/lgpl.html
+*/
+
+/*
+ Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
+
+ More efficient reading of Huffman codes, a streamlined read_bunzip()
+ function, and various other tweaks. In (limited) tests, approximately
+ 20% faster than bzcat on x86 and about 10% faster on arm.
+
+ Note that about 2/3 of the time is spent in read_unzip() reversing
+ the Burrows-Wheeler transformation. Much of that time is delay
+ resulting from cache misses.
+
+ I would ask that anyone benefiting from this work, especially those
+ using it in commercial products, consider making a donation to my local
+ non-profit hospice organization in the name of the woman I loved, who
+ passed away Feb. 12, 2003.
+
+ In memory of Toni W. Hagan
+
+ Hospice of Acadiana, Inc.
+ 2600 Johnston St., Suite 200
+ Lafayette, LA 70503-3240
+
+ Phone (337) 232-1234 or 1-800-738-2226
+ Fax (337) 232-1297
+
+ https://www.hospiceacadiana.com/
+
+ Manuel
+ */
+
+/*
+ Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
+*/
+
+
+#ifdef STATIC
+#define PREBOOT
+#else
+#include <linux/decompress/bunzip2.h>
+#endif /* STATIC */
+
+#include <linux/decompress/mm.h>
+#include <linux/crc32poly.h>
+
+#ifndef INT_MAX
+#define INT_MAX 0x7fffffff
+#endif
+
+/* Constants for Huffman coding */
+#define MAX_GROUPS 6
+#define GROUP_SIZE 50 /* 64 would have been more efficient */
+#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
+#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
+#define SYMBOL_RUNA 0
+#define SYMBOL_RUNB 1
+
+/* Status return values */
+#define RETVAL_OK 0
+#define RETVAL_LAST_BLOCK (-1)
+#define RETVAL_NOT_BZIP_DATA (-2)
+#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
+#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
+#define RETVAL_DATA_ERROR (-5)
+#define RETVAL_OUT_OF_MEMORY (-6)
+#define RETVAL_OBSOLETE_INPUT (-7)
+
+/* Other housekeeping constants */
+#define BZIP2_IOBUF_SIZE 4096
+
+/* This is what we know about each Huffman coding group */
+struct group_data {
+ /* We have an extra slot at the end of limit[] for a sentinel value. */
+ int limit[MAX_HUFCODE_BITS+1];
+ int base[MAX_HUFCODE_BITS];
+ int permute[MAX_SYMBOLS];
+ int minLen, maxLen;
+};
+
+/* Structure holding all the housekeeping data, including IO buffers and
+ memory that persists between calls to bunzip */
+struct bunzip_data {
+ /* State for interrupting output loop */
+ int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
+ /* I/O tracking data (file handles, buffers, positions, etc.) */
+ long (*fill)(void*, unsigned long);
+ long inbufCount, inbufPos /*, outbufPos*/;
+ unsigned char *inbuf /*,*outbuf*/;
+ unsigned int inbufBitCount, inbufBits;
+ /* The CRC values stored in the block header and calculated from the
+ data */
+ unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
+ /* Intermediate buffer and its size (in bytes) */
+ unsigned int *dbuf, dbufSize;
+ /* These things are a bit too big to go on the stack */
+ unsigned char selectors[32768]; /* nSelectors = 15 bits */
+ struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
+ int io_error; /* non-zero if we have IO error */
+ int byteCount[256];
+ unsigned char symToByte[256], mtfSymbol[256];
+};
+
+
+/* Return the next nnn bits of input. All reads from the compressed input
+ are done through this function. All reads are big endian */
+static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
+{
+ unsigned int bits = 0;
+
+ /* If we need to get more data from the byte buffer, do so.
+ (Loop getting one byte at a time to enforce endianness and avoid
+ unaligned access.) */
+ while (bd->inbufBitCount < bits_wanted) {
+ /* If we need to read more data from file into byte buffer, do
+ so */
+ if (bd->inbufPos == bd->inbufCount) {
+ if (bd->io_error)
+ return 0;
+ bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
+ if (bd->inbufCount <= 0) {
+ bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
+ return 0;
+ }
+ bd->inbufPos = 0;
+ }
+ /* Avoid 32-bit overflow (dump bit buffer to top of output) */
+ if (bd->inbufBitCount >= 24) {
+ bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
+ bits_wanted -= bd->inbufBitCount;
+ bits <<= bits_wanted;
+ bd->inbufBitCount = 0;
+ }
+ /* Grab next 8 bits of input from buffer. */
+ bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
+ bd->inbufBitCount += 8;
+ }
+ /* Calculate result */
+ bd->inbufBitCount -= bits_wanted;
+ bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
+
+ return bits;
+}
+
+/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
+
+static int INIT get_next_block(struct bunzip_data *bd)
+{
+ struct group_data *hufGroup = NULL;
+ int *base = NULL;
+ int *limit = NULL;
+ int dbufCount, nextSym, dbufSize, groupCount, selector,
+ i, j, k, t, runPos, symCount, symTotal, nSelectors, *byteCount;
+ unsigned char uc, *symToByte, *mtfSymbol, *selectors;
+ unsigned int *dbuf, origPtr;
+
+ dbuf = bd->dbuf;
+ dbufSize = bd->dbufSize;
+ selectors = bd->selectors;
+ byteCount = bd->byteCount;
+ symToByte = bd->symToByte;
+ mtfSymbol = bd->mtfSymbol;
+
+ /* Read in header signature and CRC, then validate signature.
+ (last block signature means CRC is for whole file, return now) */
+ i = get_bits(bd, 24);
+ j = get_bits(bd, 24);
+ bd->headerCRC = get_bits(bd, 32);
+ if ((i == 0x177245) && (j == 0x385090))
+ return RETVAL_LAST_BLOCK;
+ if ((i != 0x314159) || (j != 0x265359))
+ return RETVAL_NOT_BZIP_DATA;
+ /* We can add support for blockRandomised if anybody complains.
+ There was some code for this in busybox 1.0.0-pre3, but nobody ever
+ noticed that it didn't actually work. */
+ if (get_bits(bd, 1))
+ return RETVAL_OBSOLETE_INPUT;
+ origPtr = get_bits(bd, 24);
+ if (origPtr >= dbufSize)
+ return RETVAL_DATA_ERROR;
+ /* mapping table: if some byte values are never used (encoding things
+ like ascii text), the compression code removes the gaps to have fewer
+ symbols to deal with, and writes a sparse bitfield indicating which
+ values were present. We make a translation table to convert the
+ symbols back to the corresponding bytes. */
+ t = get_bits(bd, 16);
+ symTotal = 0;
+ for (i = 0; i < 16; i++) {
+ if (t&(1 << (15-i))) {
+ k = get_bits(bd, 16);
+ for (j = 0; j < 16; j++)
+ if (k&(1 << (15-j)))
+ symToByte[symTotal++] = (16*i)+j;
+ }
+ }
+ /* How many different Huffman coding groups does this block use? */
+ groupCount = get_bits(bd, 3);
+ if (groupCount < 2 || groupCount > MAX_GROUPS)
+ return RETVAL_DATA_ERROR;
+ /* nSelectors: Every GROUP_SIZE many symbols we select a new
+ Huffman coding group. Read in the group selector list,
+ which is stored as MTF encoded bit runs. (MTF = Move To
+ Front, as each value is used it's moved to the start of the
+ list.) */
+ nSelectors = get_bits(bd, 15);
+ if (!nSelectors)
+ return RETVAL_DATA_ERROR;
+ for (i = 0; i < groupCount; i++)
+ mtfSymbol[i] = i;
+ for (i = 0; i < nSelectors; i++) {
+ /* Get next value */
+ for (j = 0; get_bits(bd, 1); j++)
+ if (j >= groupCount)
+ return RETVAL_DATA_ERROR;
+ /* Decode MTF to get the next selector */
+ uc = mtfSymbol[j];
+ for (; j; j--)
+ mtfSymbol[j] = mtfSymbol[j-1];
+ mtfSymbol[0] = selectors[i] = uc;
+ }
+ /* Read the Huffman coding tables for each group, which code
+ for symTotal literal symbols, plus two run symbols (RUNA,
+ RUNB) */
+ symCount = symTotal+2;
+ for (j = 0; j < groupCount; j++) {
+ unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
+ int minLen, maxLen, pp;
+ /* Read Huffman code lengths for each symbol. They're
+ stored in a way similar to mtf; record a starting
+ value for the first symbol, and an offset from the
+ previous value for everys symbol after that.
+ (Subtracting 1 before the loop and then adding it
+ back at the end is an optimization that makes the
+ test inside the loop simpler: symbol length 0
+ becomes negative, so an unsigned inequality catches
+ it.) */
+ t = get_bits(bd, 5)-1;
+ for (i = 0; i < symCount; i++) {
+ for (;;) {
+ if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
+ return RETVAL_DATA_ERROR;
+
+ /* If first bit is 0, stop. Else
+ second bit indicates whether to
+ increment or decrement the value.
+ Optimization: grab 2 bits and unget
+ the second if the first was 0. */
+
+ k = get_bits(bd, 2);
+ if (k < 2) {
+ bd->inbufBitCount++;
+ break;
+ }
+ /* Add one if second bit 1, else
+ * subtract 1. Avoids if/else */
+ t += (((k+1)&2)-1);
+ }
+ /* Correct for the initial -1, to get the
+ * final symbol length */
+ length[i] = t+1;
+ }
+ /* Find largest and smallest lengths in this group */
+ minLen = maxLen = length[0];
+
+ for (i = 1; i < symCount; i++) {
+ if (length[i] > maxLen)
+ maxLen = length[i];
+ else if (length[i] < minLen)
+ minLen = length[i];
+ }
+
+ /* Calculate permute[], base[], and limit[] tables from
+ * length[].
+ *
+ * permute[] is the lookup table for converting
+ * Huffman coded symbols into decoded symbols. base[]
+ * is the amount to subtract from the value of a
+ * Huffman symbol of a given length when using
+ * permute[].
+ *
+ * limit[] indicates the largest numerical value a
+ * symbol with a given number of bits can have. This
+ * is how the Huffman codes can vary in length: each
+ * code with a value > limit[length] needs another
+ * bit.
+ */
+ hufGroup = bd->groups+j;
+ hufGroup->minLen = minLen;
+ hufGroup->maxLen = maxLen;
+ /* Note that minLen can't be smaller than 1, so we
+ adjust the base and limit array pointers so we're
+ not always wasting the first entry. We do this
+ again when using them (during symbol decoding).*/
+ base = hufGroup->base-1;
+ limit = hufGroup->limit-1;
+ /* Calculate permute[]. Concurrently, initialize
+ * temp[] and limit[]. */
+ pp = 0;
+ for (i = minLen; i <= maxLen; i++) {
+ temp[i] = limit[i] = 0;
+ for (t = 0; t < symCount; t++)
+ if (length[t] == i)
+ hufGroup->permute[pp++] = t;
+ }
+ /* Count symbols coded for at each bit length */
+ for (i = 0; i < symCount; i++)
+ temp[length[i]]++;
+ /* Calculate limit[] (the largest symbol-coding value
+ *at each bit length, which is (previous limit <<
+ *1)+symbols at this level), and base[] (number of
+ *symbols to ignore at each bit length, which is limit
+ *minus the cumulative count of symbols coded for
+ *already). */
+ pp = t = 0;
+ for (i = minLen; i < maxLen; i++) {
+ pp += temp[i];
+ /* We read the largest possible symbol size
+ and then unget bits after determining how
+ many we need, and those extra bits could be
+ set to anything. (They're noise from
+ future symbols.) At each level we're
+ really only interested in the first few
+ bits, so here we set all the trailing
+ to-be-ignored bits to 1 so they don't
+ affect the value > limit[length]
+ comparison. */
+ limit[i] = (pp << (maxLen - i)) - 1;
+ pp <<= 1;
+ base[i+1] = pp-(t += temp[i]);
+ }
+ limit[maxLen+1] = INT_MAX; /* Sentinel value for
+ * reading next sym. */
+ limit[maxLen] = pp+temp[maxLen]-1;
+ base[minLen] = 0;
+ }
+ /* We've finished reading and digesting the block header. Now
+ read this block's Huffman coded symbols from the file and
+ undo the Huffman coding and run length encoding, saving the
+ result into dbuf[dbufCount++] = uc */
+
+ /* Initialize symbol occurrence counters and symbol Move To
+ * Front table */
+ for (i = 0; i < 256; i++) {
+ byteCount[i] = 0;
+ mtfSymbol[i] = (unsigned char)i;
+ }
+ /* Loop through compressed symbols. */
+ runPos = dbufCount = symCount = selector = 0;
+ for (;;) {
+ /* Determine which Huffman coding group to use. */
+ if (!(symCount--)) {
+ symCount = GROUP_SIZE-1;
+ if (selector >= nSelectors)
+ return RETVAL_DATA_ERROR;
+ hufGroup = bd->groups+selectors[selector++];
+ base = hufGroup->base-1;
+ limit = hufGroup->limit-1;
+ }
+ /* Read next Huffman-coded symbol. */
+ /* Note: It is far cheaper to read maxLen bits and
+ back up than it is to read minLen bits and then an
+ additional bit at a time, testing as we go.
+ Because there is a trailing last block (with file
+ CRC), there is no danger of the overread causing an
+ unexpected EOF for a valid compressed file. As a
+ further optimization, we do the read inline
+ (falling back to a call to get_bits if the buffer
+ runs dry). The following (up to got_huff_bits:) is
+ equivalent to j = get_bits(bd, hufGroup->maxLen);
+ */
+ while (bd->inbufBitCount < hufGroup->maxLen) {
+ if (bd->inbufPos == bd->inbufCount) {
+ j = get_bits(bd, hufGroup->maxLen);
+ goto got_huff_bits;
+ }
+ bd->inbufBits =
+ (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
+ bd->inbufBitCount += 8;
+ }
+ bd->inbufBitCount -= hufGroup->maxLen;
+ j = (bd->inbufBits >> bd->inbufBitCount)&
+ ((1 << hufGroup->maxLen)-1);
+got_huff_bits:
+ /* Figure how many bits are in next symbol and
+ * unget extras */
+ i = hufGroup->minLen;
+ while (j > limit[i])
+ ++i;
+ bd->inbufBitCount += (hufGroup->maxLen - i);
+ /* Huffman decode value to get nextSym (with bounds checking) */
+ if ((i > hufGroup->maxLen)
+ || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
+ >= MAX_SYMBOLS))
+ return RETVAL_DATA_ERROR;
+ nextSym = hufGroup->permute[j];
+ /* We have now decoded the symbol, which indicates
+ either a new literal byte, or a repeated run of the
+ most recent literal byte. First, check if nextSym
+ indicates a repeated run, and if so loop collecting
+ how many times to repeat the last literal. */
+ if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
+ /* If this is the start of a new run, zero out
+ * counter */
+ if (!runPos) {
+ runPos = 1;
+ t = 0;
+ }
+ /* Neat trick that saves 1 symbol: instead of
+ or-ing 0 or 1 at each bit position, add 1
+ or 2 instead. For example, 1011 is 1 << 0
+ + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
+ + 1 << 2. You can make any bit pattern
+ that way using 1 less symbol than the basic
+ or 0/1 method (except all bits 0, which
+ would use no symbols, but a run of length 0
+ doesn't mean anything in this context).
+ Thus space is saved. */
+ t += (runPos << nextSym);
+ /* +runPos if RUNA; +2*runPos if RUNB */
+
+ runPos <<= 1;
+ continue;
+ }
+ /* When we hit the first non-run symbol after a run,
+ we now know how many times to repeat the last
+ literal, so append that many copies to our buffer
+ of decoded symbols (dbuf) now. (The last literal
+ used is the one at the head of the mtfSymbol
+ array.) */
+ if (runPos) {
+ runPos = 0;
+ if (dbufCount+t >= dbufSize)
+ return RETVAL_DATA_ERROR;
+
+ uc = symToByte[mtfSymbol[0]];
+ byteCount[uc] += t;
+ while (t--)
+ dbuf[dbufCount++] = uc;
+ }
+ /* Is this the terminating symbol? */
+ if (nextSym > symTotal)
+ break;
+ /* At this point, nextSym indicates a new literal
+ character. Subtract one to get the position in the
+ MTF array at which this literal is currently to be
+ found. (Note that the result can't be -1 or 0,
+ because 0 and 1 are RUNA and RUNB. But another
+ instance of the first symbol in the mtf array,
+ position 0, would have been handled as part of a
+ run above. Therefore 1 unused mtf position minus 2
+ non-literal nextSym values equals -1.) */
+ if (dbufCount >= dbufSize)
+ return RETVAL_DATA_ERROR;
+ i = nextSym - 1;
+ uc = mtfSymbol[i];
+ /* Adjust the MTF array. Since we typically expect to
+ *move only a small number of symbols, and are bound
+ *by 256 in any case, using memmove here would
+ *typically be bigger and slower due to function call
+ *overhead and other assorted setup costs. */
+ do {
+ mtfSymbol[i] = mtfSymbol[i-1];
+ } while (--i);
+ mtfSymbol[0] = uc;
+ uc = symToByte[uc];
+ /* We have our literal byte. Save it into dbuf. */
+ byteCount[uc]++;
+ dbuf[dbufCount++] = (unsigned int)uc;
+ }
+ /* At this point, we've read all the Huffman-coded symbols
+ (and repeated runs) for this block from the input stream,
+ and decoded them into the intermediate buffer. There are
+ dbufCount many decoded bytes in dbuf[]. Now undo the
+ Burrows-Wheeler transform on dbuf. See
+ http://dogma.net/markn/articles/bwt/bwt.htm
+ */
+ /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
+ j = 0;
+ for (i = 0; i < 256; i++) {
+ k = j+byteCount[i];
+ byteCount[i] = j;
+ j = k;
+ }
+ /* Figure out what order dbuf would be in if we sorted it. */
+ for (i = 0; i < dbufCount; i++) {
+ uc = (unsigned char)(dbuf[i] & 0xff);
+ dbuf[byteCount[uc]] |= (i << 8);
+ byteCount[uc]++;
+ }
+ /* Decode first byte by hand to initialize "previous" byte.
+ Note that it doesn't get output, and if the first three
+ characters are identical it doesn't qualify as a run (hence
+ writeRunCountdown = 5). */
+ if (dbufCount) {
+ if (origPtr >= dbufCount)
+ return RETVAL_DATA_ERROR;
+ bd->writePos = dbuf[origPtr];
+ bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
+ bd->writePos >>= 8;
+ bd->writeRunCountdown = 5;
+ }
+ bd->writeCount = dbufCount;
+
+ return RETVAL_OK;
+}
+
+/* Undo burrows-wheeler transform on intermediate buffer to produce output.
+ If start_bunzip was initialized with out_fd =-1, then up to len bytes of
+ data are written to outbuf. Return value is number of bytes written or
+ error (all errors are negative numbers). If out_fd!=-1, outbuf and len
+ are ignored, data is written to out_fd and return is RETVAL_OK or error.
+*/
+
+static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
+{
+ const unsigned int *dbuf;
+ int pos, xcurrent, previous, gotcount;
+
+ /* If last read was short due to end of file, return last block now */
+ if (bd->writeCount < 0)
+ return bd->writeCount;
+
+ gotcount = 0;
+ dbuf = bd->dbuf;
+ pos = bd->writePos;
+ xcurrent = bd->writeCurrent;
+
+ /* We will always have pending decoded data to write into the output
+ buffer unless this is the very first call (in which case we haven't
+ Huffman-decoded a block into the intermediate buffer yet). */
+
+ if (bd->writeCopies) {
+ /* Inside the loop, writeCopies means extra copies (beyond 1) */
+ --bd->writeCopies;
+ /* Loop outputting bytes */
+ for (;;) {
+ /* If the output buffer is full, snapshot
+ * state and return */
+ if (gotcount >= len) {
+ bd->writePos = pos;
+ bd->writeCurrent = xcurrent;
+ bd->writeCopies++;
+ return len;
+ }
+ /* Write next byte into output buffer, updating CRC */
+ outbuf[gotcount++] = xcurrent;
+ bd->writeCRC = (((bd->writeCRC) << 8)
+ ^bd->crc32Table[((bd->writeCRC) >> 24)
+ ^xcurrent]);
+ /* Loop now if we're outputting multiple
+ * copies of this byte */
+ if (bd->writeCopies) {
+ --bd->writeCopies;
+ continue;
+ }
+decode_next_byte:
+ if (!bd->writeCount--)
+ break;
+ /* Follow sequence vector to undo
+ * Burrows-Wheeler transform */
+ previous = xcurrent;
+ pos = dbuf[pos];
+ xcurrent = pos&0xff;
+ pos >>= 8;
+ /* After 3 consecutive copies of the same
+ byte, the 4th is a repeat count. We count
+ down from 4 instead *of counting up because
+ testing for non-zero is faster */
+ if (--bd->writeRunCountdown) {
+ if (xcurrent != previous)
+ bd->writeRunCountdown = 4;
+ } else {
+ /* We have a repeated run, this byte
+ * indicates the count */
+ bd->writeCopies = xcurrent;
+ xcurrent = previous;
+ bd->writeRunCountdown = 5;
+ /* Sometimes there are just 3 bytes
+ * (run length 0) */
+ if (!bd->writeCopies)
+ goto decode_next_byte;
+ /* Subtract the 1 copy we'd output
+ * anyway to get extras */
+ --bd->writeCopies;
+ }
+ }
+ /* Decompression of this block completed successfully */
+ bd->writeCRC = ~bd->writeCRC;
+ bd->totalCRC = ((bd->totalCRC << 1) |
+ (bd->totalCRC >> 31)) ^ bd->writeCRC;
+ /* If this block had a CRC error, force file level CRC error. */
+ if (bd->writeCRC != bd->headerCRC) {
+ bd->totalCRC = bd->headerCRC+1;
+ return RETVAL_LAST_BLOCK;
+ }
+ }
+
+ /* Refill the intermediate buffer by Huffman-decoding next
+ * block of input */
+ /* (previous is just a convenient unused temp variable here) */
+ previous = get_next_block(bd);
+ if (previous) {
+ bd->writeCount = previous;
+ return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
+ }
+ bd->writeCRC = 0xffffffffUL;
+ pos = bd->writePos;
+ xcurrent = bd->writeCurrent;
+ goto decode_next_byte;
+}
+
+static long INIT nofill(void *buf, unsigned long len)
+{
+ return -1;
+}
+
+/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
+ a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
+ ignored, and data is read from file handle into temporary buffer. */
+static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
+ long (*fill)(void*, unsigned long))
+{
+ struct bunzip_data *bd;
+ unsigned int i, j, c;
+ const unsigned int BZh0 =
+ (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
+ +(((unsigned int)'h') << 8)+(unsigned int)'0';
+
+ /* Figure out how much data to allocate */
+ i = sizeof(struct bunzip_data);
+
+ /* Allocate bunzip_data. Most fields initialize to zero. */
+ bd = *bdp = malloc(i);
+ if (!bd)
+ return RETVAL_OUT_OF_MEMORY;
+ memset(bd, 0, sizeof(struct bunzip_data));
+ /* Setup input buffer */
+ bd->inbuf = inbuf;
+ bd->inbufCount = len;
+ if (fill != NULL)
+ bd->fill = fill;
+ else
+ bd->fill = nofill;
+
+ /* Init the CRC32 table (big endian) */
+ for (i = 0; i < 256; i++) {
+ c = i << 24;
+ for (j = 8; j; j--)
+ c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
+ bd->crc32Table[i] = c;
+ }
+
+ /* Ensure that file starts with "BZh['1'-'9']." */
+ i = get_bits(bd, 32);
+ if (((unsigned int)(i-BZh0-1)) >= 9)
+ return RETVAL_NOT_BZIP_DATA;
+
+ /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
+ uncompressed data. Allocate intermediate buffer for block. */
+ bd->dbufSize = 100000*(i-BZh0);
+
+ bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+ if (!bd->dbuf)
+ return RETVAL_OUT_OF_MEMORY;
+ return RETVAL_OK;
+}
+
+/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
+ not end of file.) */
+STATIC int INIT bunzip2(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *outbuf,
+ long *pos,
+ void(*error)(char *x))
+{
+ struct bunzip_data *bd;
+ int i = -1;
+ unsigned char *inbuf;
+
+ if (flush)
+ outbuf = malloc(BZIP2_IOBUF_SIZE);
+
+ if (!outbuf) {
+ error("Could not allocate output buffer");
+ return RETVAL_OUT_OF_MEMORY;
+ }
+ if (buf)
+ inbuf = buf;
+ else
+ inbuf = malloc(BZIP2_IOBUF_SIZE);
+ if (!inbuf) {
+ error("Could not allocate input buffer");
+ i = RETVAL_OUT_OF_MEMORY;
+ goto exit_0;
+ }
+ i = start_bunzip(&bd, inbuf, len, fill);
+ if (!i) {
+ for (;;) {
+ i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
+ if (i <= 0)
+ break;
+ if (!flush)
+ outbuf += i;
+ else
+ if (i != flush(outbuf, i)) {
+ i = RETVAL_UNEXPECTED_OUTPUT_EOF;
+ break;
+ }
+ }
+ }
+ /* Check CRC and release memory */
+ if (i == RETVAL_LAST_BLOCK) {
+ if (bd->headerCRC != bd->totalCRC)
+ error("Data integrity error when decompressing.");
+ else
+ i = RETVAL_OK;
+ } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
+ error("Compressed file ends unexpectedly");
+ }
+ if (!bd)
+ goto exit_1;
+ if (bd->dbuf)
+ large_free(bd->dbuf);
+ if (pos)
+ *pos = bd->inbufPos;
+ free(bd);
+exit_1:
+ if (!buf)
+ free(inbuf);
+exit_0:
+ if (flush)
+ free(outbuf);
+ return i;
+}
+
+#ifdef PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *outbuf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
+}
+#endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
new file mode 100644
index 000000000..e19199f4a
--- /dev/null
+++ b/lib/decompress_inflate.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifdef STATIC
+#define PREBOOT
+/* Pre-boot environment: included */
+
+/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
+ * errors about console_printk etc... on ARM */
+#define _LINUX_KERNEL_H
+
+#include "zlib_inflate/inftrees.c"
+#include "zlib_inflate/inffast.c"
+#include "zlib_inflate/inflate.c"
+#ifdef CONFIG_ZLIB_DFLTCC
+#include "zlib_dfltcc/dfltcc.c"
+#include "zlib_dfltcc/dfltcc_inflate.c"
+#endif
+
+#else /* STATIC */
+/* initramfs et al: linked */
+
+#include <linux/zutil.h>
+
+#include "zlib_inflate/inftrees.h"
+#include "zlib_inflate/inffast.h"
+#include "zlib_inflate/inflate.h"
+
+#include "zlib_inflate/infutil.h"
+#include <linux/decompress/inflate.h>
+
+#endif /* STATIC */
+
+#include <linux/decompress/mm.h>
+
+#define GZIP_IOBUF_SIZE (16*1024)
+
+static long INIT nofill(void *buffer, unsigned long len)
+{
+ return -1;
+}
+
+/* Included from initramfs et al code */
+static int INIT __gunzip(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void(*error)(char *x)) {
+ u8 *zbuf;
+ struct z_stream_s *strm;
+ int rc;
+
+ rc = -1;
+ if (flush) {
+ out_len = 0x8000; /* 32 K */
+ out_buf = malloc(out_len);
+ } else {
+ if (!out_len)
+ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
+ }
+ if (!out_buf) {
+ error("Out of memory while allocating output buffer");
+ goto gunzip_nomem1;
+ }
+
+ if (buf)
+ zbuf = buf;
+ else {
+ zbuf = malloc(GZIP_IOBUF_SIZE);
+ len = 0;
+ }
+ if (!zbuf) {
+ error("Out of memory while allocating input buffer");
+ goto gunzip_nomem2;
+ }
+
+ strm = malloc(sizeof(*strm));
+ if (strm == NULL) {
+ error("Out of memory while allocating z_stream");
+ goto gunzip_nomem3;
+ }
+
+ strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
+#ifdef CONFIG_ZLIB_DFLTCC
+ /* Always allocate the full workspace for DFLTCC */
+ zlib_inflate_workspacesize());
+#else
+ sizeof(struct inflate_state));
+#endif
+ if (strm->workspace == NULL) {
+ error("Out of memory while allocating workspace");
+ goto gunzip_nomem4;
+ }
+
+ if (!fill)
+ fill = nofill;
+
+ if (len == 0)
+ len = fill(zbuf, GZIP_IOBUF_SIZE);
+
+ /* verify the gzip header */
+ if (len < 10 ||
+ zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
+ if (pos)
+ *pos = 0;
+ error("Not a gzip file");
+ goto gunzip_5;
+ }
+
+ /* skip over gzip header (1f,8b,08... 10 bytes total +
+ * possible asciz filename)
+ */
+ strm->next_in = zbuf + 10;
+ strm->avail_in = len - 10;
+ /* skip over asciz filename */
+ if (zbuf[3] & 0x8) {
+ do {
+ /*
+ * If the filename doesn't fit into the buffer,
+ * the file is very probably corrupt. Don't try
+ * to read more data.
+ */
+ if (strm->avail_in == 0) {
+ error("header error");
+ goto gunzip_5;
+ }
+ --strm->avail_in;
+ } while (*strm->next_in++);
+ }
+
+ strm->next_out = out_buf;
+ strm->avail_out = out_len;
+
+ rc = zlib_inflateInit2(strm, -MAX_WBITS);
+
+#ifdef CONFIG_ZLIB_DFLTCC
+ /* Always keep the window for DFLTCC */
+#else
+ if (!flush) {
+ WS(strm)->inflate_state.wsize = 0;
+ WS(strm)->inflate_state.window = NULL;
+ }
+#endif
+
+ while (rc == Z_OK) {
+ if (strm->avail_in == 0) {
+ /* TODO: handle case where both pos and fill are set */
+ len = fill(zbuf, GZIP_IOBUF_SIZE);
+ if (len < 0) {
+ rc = -1;
+ error("read error");
+ break;
+ }
+ strm->next_in = zbuf;
+ strm->avail_in = len;
+ }
+ rc = zlib_inflate(strm, 0);
+
+ /* Write any data generated */
+ if (flush && strm->next_out > out_buf) {
+ long l = strm->next_out - out_buf;
+ if (l != flush(out_buf, l)) {
+ rc = -1;
+ error("write error");
+ break;
+ }
+ strm->next_out = out_buf;
+ strm->avail_out = out_len;
+ }
+
+ /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
+ if (rc == Z_STREAM_END) {
+ rc = 0;
+ break;
+ } else if (rc != Z_OK) {
+ error("uncompression error");
+ rc = -1;
+ }
+ }
+
+ zlib_inflateEnd(strm);
+ if (pos)
+ /* add + 8 to skip over trailer */
+ *pos = strm->next_in - zbuf+8;
+
+gunzip_5:
+ free(strm->workspace);
+gunzip_nomem4:
+ free(strm);
+gunzip_nomem3:
+ if (!buf)
+ free(zbuf);
+gunzip_nomem2:
+ if (flush)
+ free(out_buf);
+gunzip_nomem1:
+ return rc; /* returns Z_OK (0) if successful */
+}
+
+#ifndef PREBOOT
+STATIC int INIT gunzip(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
+}
+#else
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
+}
+#endif
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
new file mode 100644
index 000000000..e6327391b
--- /dev/null
+++ b/lib/decompress_unlz4.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd
+ *
+ * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ */
+
+#ifdef STATIC
+#define PREBOOT
+#include "lz4/lz4_decompress.c"
+#else
+#include <linux/decompress/unlz4.h>
+#endif
+#include <linux/types.h>
+#include <linux/lz4.h>
+#include <linux/decompress/mm.h>
+#include <linux/compiler.h>
+
+#include <asm/unaligned.h>
+
+/*
+ * Note: Uncompressed chunk size is used in the compressor side
+ * (userspace side for compression).
+ * It is hardcoded because there is not proper way to extract it
+ * from the binary stream which is generated by the preliminary
+ * version of LZ4 tool so far.
+ */
+#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
+#define ARCHIVE_MAGICNUMBER 0x184C2102
+
+STATIC inline int INIT unlz4(u8 *input, long in_len,
+ long (*fill)(void *, unsigned long),
+ long (*flush)(void *, unsigned long),
+ u8 *output, long *posp,
+ void (*error) (char *x))
+{
+ int ret = -1;
+ size_t chunksize = 0;
+ size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE;
+ u8 *inp;
+ u8 *inp_start;
+ u8 *outp;
+ long size = in_len;
+#ifdef PREBOOT
+ size_t out_len = get_unaligned_le32(input + in_len);
+#endif
+ size_t dest_len;
+
+
+ if (output) {
+ outp = output;
+ } else if (!flush) {
+ error("NULL output pointer and no flush function provided");
+ goto exit_0;
+ } else {
+ outp = large_malloc(uncomp_chunksize);
+ if (!outp) {
+ error("Could not allocate output buffer");
+ goto exit_0;
+ }
+ }
+
+ if (input && fill) {
+ error("Both input pointer and fill function provided,");
+ goto exit_1;
+ } else if (input) {
+ inp = input;
+ } else if (!fill) {
+ error("NULL input pointer and missing fill function");
+ goto exit_1;
+ } else {
+ inp = large_malloc(LZ4_compressBound(uncomp_chunksize));
+ if (!inp) {
+ error("Could not allocate input buffer");
+ goto exit_1;
+ }
+ }
+ inp_start = inp;
+
+ if (posp)
+ *posp = 0;
+
+ if (fill) {
+ size = fill(inp, 4);
+ if (size < 4) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ }
+
+ chunksize = get_unaligned_le32(inp);
+ if (chunksize == ARCHIVE_MAGICNUMBER) {
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ }
+ } else {
+ error("invalid header");
+ goto exit_2;
+ }
+
+ if (posp)
+ *posp += 4;
+
+ for (;;) {
+
+ if (fill) {
+ size = fill(inp, 4);
+ if (size == 0)
+ break;
+ if (size < 4) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ } else if (size < 4) {
+ /* empty or end-of-file */
+ goto exit_3;
+ }
+
+ chunksize = get_unaligned_le32(inp);
+ if (chunksize == ARCHIVE_MAGICNUMBER) {
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ }
+ if (posp)
+ *posp += 4;
+ continue;
+ }
+
+ if (!fill && chunksize == 0) {
+ /* empty or end-of-file */
+ goto exit_3;
+ }
+
+ if (posp)
+ *posp += 4;
+
+ if (!fill) {
+ inp += 4;
+ size -= 4;
+ } else {
+ if (chunksize > LZ4_compressBound(uncomp_chunksize)) {
+ error("chunk length is longer than allocated");
+ goto exit_2;
+ }
+ size = fill(inp, chunksize);
+ if (size < chunksize) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ }
+#ifdef PREBOOT
+ if (out_len >= uncomp_chunksize) {
+ dest_len = uncomp_chunksize;
+ out_len -= dest_len;
+ } else
+ dest_len = out_len;
+
+ ret = LZ4_decompress_fast(inp, outp, dest_len);
+ chunksize = ret;
+#else
+ dest_len = uncomp_chunksize;
+
+ ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len);
+ dest_len = ret;
+#endif
+ if (ret < 0) {
+ error("Decoding failed");
+ goto exit_2;
+ }
+
+ ret = -1;
+ if (flush && flush(outp, dest_len) != dest_len)
+ goto exit_2;
+ if (output)
+ outp += dest_len;
+ if (posp)
+ *posp += chunksize;
+
+ if (!fill) {
+ size -= chunksize;
+
+ if (size == 0)
+ break;
+ else if (size < 0) {
+ error("data corrupted");
+ goto exit_2;
+ }
+ inp += chunksize;
+ }
+ }
+
+exit_3:
+ ret = 0;
+exit_2:
+ if (!input)
+ large_free(inp_start);
+exit_1:
+ if (!output)
+ large_free(outp);
+exit_0:
+ return ret;
+}
+
+#ifdef PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output, long out_len,
+ long *posp,
+ void (*error)(char *x)
+ )
+{
+ return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
+}
+#endif
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
new file mode 100644
index 000000000..20a858031
--- /dev/null
+++ b/lib/decompress_unlzma.c
@@ -0,0 +1,679 @@
+/* Lzma decompressor for Linux kernel. Shamelessly snarfed
+ *from busybox 1.1.1
+ *
+ *Linux kernel adaptation
+ *Copyright (C) 2006 Alain < alain@knaff.lu >
+ *
+ *Based on small lzma deflate implementation/Small range coder
+ *implementation for lzma.
+ *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
+ *
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
+ *Copyright (C) 1999-2005 Igor Pavlov
+ *
+ *Copyrights of the parts, see headers below.
+ *
+ *
+ *This program is free software; you can redistribute it and/or
+ *modify it under the terms of the GNU Lesser General Public
+ *License as published by the Free Software Foundation; either
+ *version 2.1 of the License, or (at your option) any later version.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ *Lesser General Public License for more details.
+ *
+ *You should have received a copy of the GNU Lesser General Public
+ *License along with this library; if not, write to the Free Software
+ *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifdef STATIC
+#define PREBOOT
+#else
+#include <linux/decompress/unlzma.h>
+#endif /* STATIC */
+
+#include <linux/decompress/mm.h>
+
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+static long long INIT read_int(unsigned char *ptr, int size)
+{
+ int i;
+ long long ret = 0;
+
+ for (i = 0; i < size; i++)
+ ret = (ret << 8) | ptr[size-i-1];
+ return ret;
+}
+
+#define ENDIAN_CONVERT(x) \
+ x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
+
+
+/* Small range coder implementation for lzma.
+ *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
+ *
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
+ *Copyright (c) 1999-2005 Igor Pavlov
+ */
+
+#include <linux/compiler.h>
+
+#define LZMA_IOBUF_SIZE 0x10000
+
+struct rc {
+ long (*fill)(void*, unsigned long);
+ uint8_t *ptr;
+ uint8_t *buffer;
+ uint8_t *buffer_end;
+ long buffer_size;
+ uint32_t code;
+ uint32_t range;
+ uint32_t bound;
+ void (*error)(char *);
+};
+
+
+#define RC_TOP_BITS 24
+#define RC_MOVE_BITS 5
+#define RC_MODEL_TOTAL_BITS 11
+
+
+static long INIT nofill(void *buffer, unsigned long len)
+{
+ return -1;
+}
+
+/* Called twice: once at startup and once in rc_normalize() */
+static void INIT rc_read(struct rc *rc)
+{
+ rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
+ if (rc->buffer_size <= 0)
+ rc->error("unexpected EOF");
+ rc->ptr = rc->buffer;
+ rc->buffer_end = rc->buffer + rc->buffer_size;
+}
+
+/* Called once */
+static inline void INIT rc_init(struct rc *rc,
+ long (*fill)(void*, unsigned long),
+ char *buffer, long buffer_size)
+{
+ if (fill)
+ rc->fill = fill;
+ else
+ rc->fill = nofill;
+ rc->buffer = (uint8_t *)buffer;
+ rc->buffer_size = buffer_size;
+ rc->buffer_end = rc->buffer + rc->buffer_size;
+ rc->ptr = rc->buffer;
+
+ rc->code = 0;
+ rc->range = 0xFFFFFFFF;
+}
+
+static inline void INIT rc_init_code(struct rc *rc)
+{
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ if (rc->ptr >= rc->buffer_end)
+ rc_read(rc);
+ rc->code = (rc->code << 8) | *rc->ptr++;
+ }
+}
+
+
+/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
+static void INIT rc_do_normalize(struct rc *rc)
+{
+ if (rc->ptr >= rc->buffer_end)
+ rc_read(rc);
+ rc->range <<= 8;
+ rc->code = (rc->code << 8) | *rc->ptr++;
+}
+static inline void INIT rc_normalize(struct rc *rc)
+{
+ if (rc->range < (1 << RC_TOP_BITS))
+ rc_do_normalize(rc);
+}
+
+/* Called 9 times */
+/* Why rc_is_bit_0_helper exists?
+ *Because we want to always expose (rc->code < rc->bound) to optimizer
+ */
+static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
+{
+ rc_normalize(rc);
+ rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
+ return rc->bound;
+}
+static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
+{
+ uint32_t t = rc_is_bit_0_helper(rc, p);
+ return rc->code < t;
+}
+
+/* Called ~10 times, but very small, thus inlined */
+static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
+{
+ rc->range = rc->bound;
+ *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
+}
+static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
+{
+ rc->range -= rc->bound;
+ rc->code -= rc->bound;
+ *p -= *p >> RC_MOVE_BITS;
+}
+
+/* Called 4 times in unlzma loop */
+static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
+{
+ if (rc_is_bit_0(rc, p)) {
+ rc_update_bit_0(rc, p);
+ *symbol *= 2;
+ return 0;
+ } else {
+ rc_update_bit_1(rc, p);
+ *symbol = *symbol * 2 + 1;
+ return 1;
+ }
+}
+
+/* Called once */
+static inline int INIT rc_direct_bit(struct rc *rc)
+{
+ rc_normalize(rc);
+ rc->range >>= 1;
+ if (rc->code >= rc->range) {
+ rc->code -= rc->range;
+ return 1;
+ }
+ return 0;
+}
+
+/* Called twice */
+static inline void INIT
+rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
+{
+ int i = num_levels;
+
+ *symbol = 1;
+ while (i--)
+ rc_get_bit(rc, p + *symbol, symbol);
+ *symbol -= 1 << num_levels;
+}
+
+
+/*
+ * Small lzma deflate implementation.
+ * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
+ *
+ * Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/)
+ * Copyright (C) 1999-2005 Igor Pavlov
+ */
+
+
+struct lzma_header {
+ uint8_t pos;
+ uint32_t dict_size;
+ uint64_t dst_size;
+} __attribute__ ((packed)) ;
+
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_NUM_POS_BITS_MAX 4
+
+#define LZMA_LEN_NUM_LOW_BITS 3
+#define LZMA_LEN_NUM_MID_BITS 3
+#define LZMA_LEN_NUM_HIGH_BITS 8
+
+#define LZMA_LEN_CHOICE 0
+#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
+#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
+#define LZMA_LEN_MID (LZMA_LEN_LOW \
+ + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
+#define LZMA_LEN_HIGH (LZMA_LEN_MID \
+ +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
+#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
+
+#define LZMA_NUM_STATES 12
+#define LZMA_NUM_LIT_STATES 7
+
+#define LZMA_START_POS_MODEL_INDEX 4
+#define LZMA_END_POS_MODEL_INDEX 14
+#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
+
+#define LZMA_NUM_POS_SLOT_BITS 6
+#define LZMA_NUM_LEN_TO_POS_STATES 4
+
+#define LZMA_NUM_ALIGN_BITS 4
+
+#define LZMA_MATCH_MIN_LEN 2
+
+#define LZMA_IS_MATCH 0
+#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
+#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
+#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
+#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
+#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
+#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
+ + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
+#define LZMA_SPEC_POS (LZMA_POS_SLOT \
+ +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
+#define LZMA_ALIGN (LZMA_SPEC_POS \
+ + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
+#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
+#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
+#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
+
+
+struct writer {
+ uint8_t *buffer;
+ uint8_t previous_byte;
+ size_t buffer_pos;
+ int bufsize;
+ size_t global_pos;
+ long (*flush)(void*, unsigned long);
+ struct lzma_header *header;
+};
+
+struct cstate {
+ int state;
+ uint32_t rep0, rep1, rep2, rep3;
+};
+
+static inline size_t INIT get_pos(struct writer *wr)
+{
+ return
+ wr->global_pos + wr->buffer_pos;
+}
+
+static inline uint8_t INIT peek_old_byte(struct writer *wr,
+ uint32_t offs)
+{
+ if (!wr->flush) {
+ int32_t pos;
+ while (offs > wr->header->dict_size)
+ offs -= wr->header->dict_size;
+ pos = wr->buffer_pos - offs;
+ return wr->buffer[pos];
+ } else {
+ uint32_t pos = wr->buffer_pos - offs;
+ while (pos >= wr->header->dict_size)
+ pos += wr->header->dict_size;
+ return wr->buffer[pos];
+ }
+
+}
+
+static inline int INIT write_byte(struct writer *wr, uint8_t byte)
+{
+ wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
+ if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
+ wr->buffer_pos = 0;
+ wr->global_pos += wr->header->dict_size;
+ if (wr->flush((char *)wr->buffer, wr->header->dict_size)
+ != wr->header->dict_size)
+ return -1;
+ }
+ return 0;
+}
+
+
+static inline int INIT copy_byte(struct writer *wr, uint32_t offs)
+{
+ return write_byte(wr, peek_old_byte(wr, offs));
+}
+
+static inline int INIT copy_bytes(struct writer *wr,
+ uint32_t rep0, int len)
+{
+ do {
+ if (copy_byte(wr, rep0))
+ return -1;
+ len--;
+ } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
+
+ return len;
+}
+
+static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
+ struct cstate *cst, uint16_t *p,
+ int pos_state, uint16_t *prob,
+ int lc, uint32_t literal_pos_mask) {
+ int mi = 1;
+ rc_update_bit_0(rc, prob);
+ prob = (p + LZMA_LITERAL +
+ (LZMA_LIT_SIZE
+ * (((get_pos(wr) & literal_pos_mask) << lc)
+ + (wr->previous_byte >> (8 - lc))))
+ );
+
+ if (cst->state >= LZMA_NUM_LIT_STATES) {
+ int match_byte = peek_old_byte(wr, cst->rep0);
+ do {
+ int bit;
+ uint16_t *prob_lit;
+
+ match_byte <<= 1;
+ bit = match_byte & 0x100;
+ prob_lit = prob + 0x100 + bit + mi;
+ if (rc_get_bit(rc, prob_lit, &mi)) {
+ if (!bit)
+ break;
+ } else {
+ if (bit)
+ break;
+ }
+ } while (mi < 0x100);
+ }
+ while (mi < 0x100) {
+ uint16_t *prob_lit = prob + mi;
+ rc_get_bit(rc, prob_lit, &mi);
+ }
+ if (cst->state < 4)
+ cst->state = 0;
+ else if (cst->state < 10)
+ cst->state -= 3;
+ else
+ cst->state -= 6;
+
+ return write_byte(wr, mi);
+}
+
+static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
+ struct cstate *cst, uint16_t *p,
+ int pos_state, uint16_t *prob) {
+ int offset;
+ uint16_t *prob_len;
+ int num_bits;
+ int len;
+
+ rc_update_bit_1(rc, prob);
+ prob = p + LZMA_IS_REP + cst->state;
+ if (rc_is_bit_0(rc, prob)) {
+ rc_update_bit_0(rc, prob);
+ cst->rep3 = cst->rep2;
+ cst->rep2 = cst->rep1;
+ cst->rep1 = cst->rep0;
+ cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
+ prob = p + LZMA_LEN_CODER;
+ } else {
+ rc_update_bit_1(rc, prob);
+ prob = p + LZMA_IS_REP_G0 + cst->state;
+ if (rc_is_bit_0(rc, prob)) {
+ rc_update_bit_0(rc, prob);
+ prob = (p + LZMA_IS_REP_0_LONG
+ + (cst->state <<
+ LZMA_NUM_POS_BITS_MAX) +
+ pos_state);
+ if (rc_is_bit_0(rc, prob)) {
+ rc_update_bit_0(rc, prob);
+
+ cst->state = cst->state < LZMA_NUM_LIT_STATES ?
+ 9 : 11;
+ return copy_byte(wr, cst->rep0);
+ } else {
+ rc_update_bit_1(rc, prob);
+ }
+ } else {
+ uint32_t distance;
+
+ rc_update_bit_1(rc, prob);
+ prob = p + LZMA_IS_REP_G1 + cst->state;
+ if (rc_is_bit_0(rc, prob)) {
+ rc_update_bit_0(rc, prob);
+ distance = cst->rep1;
+ } else {
+ rc_update_bit_1(rc, prob);
+ prob = p + LZMA_IS_REP_G2 + cst->state;
+ if (rc_is_bit_0(rc, prob)) {
+ rc_update_bit_0(rc, prob);
+ distance = cst->rep2;
+ } else {
+ rc_update_bit_1(rc, prob);
+ distance = cst->rep3;
+ cst->rep3 = cst->rep2;
+ }
+ cst->rep2 = cst->rep1;
+ }
+ cst->rep1 = cst->rep0;
+ cst->rep0 = distance;
+ }
+ cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
+ prob = p + LZMA_REP_LEN_CODER;
+ }
+
+ prob_len = prob + LZMA_LEN_CHOICE;
+ if (rc_is_bit_0(rc, prob_len)) {
+ rc_update_bit_0(rc, prob_len);
+ prob_len = (prob + LZMA_LEN_LOW
+ + (pos_state <<
+ LZMA_LEN_NUM_LOW_BITS));
+ offset = 0;
+ num_bits = LZMA_LEN_NUM_LOW_BITS;
+ } else {
+ rc_update_bit_1(rc, prob_len);
+ prob_len = prob + LZMA_LEN_CHOICE_2;
+ if (rc_is_bit_0(rc, prob_len)) {
+ rc_update_bit_0(rc, prob_len);
+ prob_len = (prob + LZMA_LEN_MID
+ + (pos_state <<
+ LZMA_LEN_NUM_MID_BITS));
+ offset = 1 << LZMA_LEN_NUM_LOW_BITS;
+ num_bits = LZMA_LEN_NUM_MID_BITS;
+ } else {
+ rc_update_bit_1(rc, prob_len);
+ prob_len = prob + LZMA_LEN_HIGH;
+ offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
+ + (1 << LZMA_LEN_NUM_MID_BITS));
+ num_bits = LZMA_LEN_NUM_HIGH_BITS;
+ }
+ }
+
+ rc_bit_tree_decode(rc, prob_len, num_bits, &len);
+ len += offset;
+
+ if (cst->state < 4) {
+ int pos_slot;
+
+ cst->state += LZMA_NUM_LIT_STATES;
+ prob =
+ p + LZMA_POS_SLOT +
+ ((len <
+ LZMA_NUM_LEN_TO_POS_STATES ? len :
+ LZMA_NUM_LEN_TO_POS_STATES - 1)
+ << LZMA_NUM_POS_SLOT_BITS);
+ rc_bit_tree_decode(rc, prob,
+ LZMA_NUM_POS_SLOT_BITS,
+ &pos_slot);
+ if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
+ int i, mi;
+ num_bits = (pos_slot >> 1) - 1;
+ cst->rep0 = 2 | (pos_slot & 1);
+ if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
+ cst->rep0 <<= num_bits;
+ prob = p + LZMA_SPEC_POS +
+ cst->rep0 - pos_slot - 1;
+ } else {
+ num_bits -= LZMA_NUM_ALIGN_BITS;
+ while (num_bits--)
+ cst->rep0 = (cst->rep0 << 1) |
+ rc_direct_bit(rc);
+ prob = p + LZMA_ALIGN;
+ cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
+ num_bits = LZMA_NUM_ALIGN_BITS;
+ }
+ i = 1;
+ mi = 1;
+ while (num_bits--) {
+ if (rc_get_bit(rc, prob + mi, &mi))
+ cst->rep0 |= i;
+ i <<= 1;
+ }
+ } else
+ cst->rep0 = pos_slot;
+ if (++(cst->rep0) == 0)
+ return 0;
+ if (cst->rep0 > wr->header->dict_size
+ || cst->rep0 > get_pos(wr))
+ return -1;
+ }
+
+ len += LZMA_MATCH_MIN_LEN;
+
+ return copy_bytes(wr, cst->rep0, len);
+}
+
+
+
+STATIC inline int INIT unlzma(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *posp,
+ void(*error)(char *x)
+ )
+{
+ struct lzma_header header;
+ int lc, pb, lp;
+ uint32_t pos_state_mask;
+ uint32_t literal_pos_mask;
+ uint16_t *p;
+ int num_probs;
+ struct rc rc;
+ int i, mi;
+ struct writer wr;
+ struct cstate cst;
+ unsigned char *inbuf;
+ int ret = -1;
+
+ rc.error = error;
+
+ if (buf)
+ inbuf = buf;
+ else
+ inbuf = malloc(LZMA_IOBUF_SIZE);
+ if (!inbuf) {
+ error("Could not allocate input buffer");
+ goto exit_0;
+ }
+
+ cst.state = 0;
+ cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
+
+ wr.header = &header;
+ wr.flush = flush;
+ wr.global_pos = 0;
+ wr.previous_byte = 0;
+ wr.buffer_pos = 0;
+
+ rc_init(&rc, fill, inbuf, in_len);
+
+ for (i = 0; i < sizeof(header); i++) {
+ if (rc.ptr >= rc.buffer_end)
+ rc_read(&rc);
+ ((unsigned char *)&header)[i] = *rc.ptr++;
+ }
+
+ if (header.pos >= (9 * 5 * 5)) {
+ error("bad header");
+ goto exit_1;
+ }
+
+ mi = 0;
+ lc = header.pos;
+ while (lc >= 9) {
+ mi++;
+ lc -= 9;
+ }
+ pb = 0;
+ lp = mi;
+ while (lp >= 5) {
+ pb++;
+ lp -= 5;
+ }
+ pos_state_mask = (1 << pb) - 1;
+ literal_pos_mask = (1 << lp) - 1;
+
+ ENDIAN_CONVERT(header.dict_size);
+ ENDIAN_CONVERT(header.dst_size);
+
+ if (header.dict_size == 0)
+ header.dict_size = 1;
+
+ if (output)
+ wr.buffer = output;
+ else {
+ wr.bufsize = MIN(header.dst_size, header.dict_size);
+ wr.buffer = large_malloc(wr.bufsize);
+ }
+ if (wr.buffer == NULL)
+ goto exit_1;
+
+ num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
+ p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
+ if (p == NULL)
+ goto exit_2;
+ num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
+ for (i = 0; i < num_probs; i++)
+ p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
+
+ rc_init_code(&rc);
+
+ while (get_pos(&wr) < header.dst_size) {
+ int pos_state = get_pos(&wr) & pos_state_mask;
+ uint16_t *prob = p + LZMA_IS_MATCH +
+ (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
+ if (rc_is_bit_0(&rc, prob)) {
+ if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
+ lc, literal_pos_mask)) {
+ error("LZMA data is corrupt");
+ goto exit_3;
+ }
+ } else {
+ if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
+ error("LZMA data is corrupt");
+ goto exit_3;
+ }
+ if (cst.rep0 == 0)
+ break;
+ }
+ if (rc.buffer_size <= 0)
+ goto exit_3;
+ }
+
+ if (posp)
+ *posp = rc.ptr-rc.buffer;
+ if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos)
+ ret = 0;
+exit_3:
+ large_free(p);
+exit_2:
+ if (!output)
+ large_free(wr.buffer);
+exit_1:
+ if (!buf)
+ free(inbuf);
+exit_0:
+ return ret;
+}
+
+#ifdef PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output, long out_len,
+ long *posp,
+ void (*error)(char *x))
+{
+ return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
+}
+#endif
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
new file mode 100644
index 000000000..64c135850
--- /dev/null
+++ b/lib/decompress_unlzo.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LZO decompressor for the Linux kernel. Code borrowed from the lzo
+ * implementation by Markus Franz Xaver Johannes Oberhumer.
+ *
+ * Linux kernel adaptation:
+ * Copyright (C) 2009
+ * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com>
+ *
+ * Original code:
+ * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer
+ * All Rights Reserved.
+ *
+ * Markus F.X.J. Oberhumer
+ * <markus@oberhumer.com>
+ * http://www.oberhumer.com/opensource/lzop/
+ */
+
+#ifdef STATIC
+#define PREBOOT
+#include "lzo/lzo1x_decompress_safe.c"
+#else
+#include <linux/decompress/unlzo.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/lzo.h>
+#include <linux/decompress/mm.h>
+
+#include <linux/compiler.h>
+#include <asm/unaligned.h>
+
+static const unsigned char lzop_magic[] = {
+ 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a };
+
+#define LZO_BLOCK_SIZE (256*1024l)
+#define HEADER_HAS_FILTER 0x00000800L
+#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4)
+#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
+
+STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
+{
+ int l;
+ u8 *parse = input;
+ u8 *end = input + in_len;
+ u16 version;
+
+ /*
+ * Check that there's enough input to possibly have a valid header.
+ * Then it is possible to parse several fields until the minimum
+ * size may have been used.
+ */
+ if (in_len < HEADER_SIZE_MIN)
+ return 0;
+
+ /* read magic: 9 first bits */
+ for (l = 0; l < 9; l++) {
+ if (*parse++ != lzop_magic[l])
+ return 0;
+ }
+ /* get version (2bytes), skip library version (2),
+ * 'need to be extracted' version (2) and
+ * method (1) */
+ version = get_unaligned_be16(parse);
+ parse += 7;
+ if (version >= 0x0940)
+ parse++;
+ if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
+ parse += 8; /* flags + filter info */
+ else
+ parse += 4; /* flags */
+
+ /*
+ * At least mode, mtime_low, filename length, and checksum must
+ * be left to be parsed. If also mtime_high is present, it's OK
+ * because the next input buffer check is after reading the
+ * filename length.
+ */
+ if (end - parse < 8 + 1 + 4)
+ return 0;
+
+ /* skip mode and mtime_low */
+ parse += 8;
+ if (version >= 0x0940)
+ parse += 4; /* skip mtime_high */
+
+ l = *parse++;
+ /* don't care about the file name, and skip checksum */
+ if (end - parse < l + 4)
+ return 0;
+ parse += l + 4;
+
+ *skip = parse - input;
+ return 1;
+}
+
+STATIC int INIT unlzo(u8 *input, long in_len,
+ long (*fill)(void *, unsigned long),
+ long (*flush)(void *, unsigned long),
+ u8 *output, long *posp,
+ void (*error) (char *x))
+{
+ u8 r = 0;
+ long skip = 0;
+ u32 src_len, dst_len;
+ size_t tmp;
+ u8 *in_buf, *in_buf_save, *out_buf;
+ int ret = -1;
+
+ if (output) {
+ out_buf = output;
+ } else if (!flush) {
+ error("NULL output pointer and no flush function provided");
+ goto exit;
+ } else {
+ out_buf = malloc(LZO_BLOCK_SIZE);
+ if (!out_buf) {
+ error("Could not allocate output buffer");
+ goto exit;
+ }
+ }
+
+ if (input && fill) {
+ error("Both input pointer and fill function provided, don't know what to do");
+ goto exit_1;
+ } else if (input) {
+ in_buf = input;
+ } else if (!fill) {
+ error("NULL input pointer and missing fill function");
+ goto exit_1;
+ } else {
+ in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
+ if (!in_buf) {
+ error("Could not allocate input buffer");
+ goto exit_1;
+ }
+ }
+ in_buf_save = in_buf;
+
+ if (posp)
+ *posp = 0;
+
+ if (fill) {
+ /*
+ * Start from in_buf + HEADER_SIZE_MAX to make it possible
+ * to use memcpy() to copy the unused data to the beginning
+ * of the buffer. This way memmove() isn't needed which
+ * is missing from pre-boot environments of most archs.
+ */
+ in_buf += HEADER_SIZE_MAX;
+ in_len = fill(in_buf, HEADER_SIZE_MAX);
+ }
+
+ if (!parse_header(in_buf, &skip, in_len)) {
+ error("invalid header");
+ goto exit_2;
+ }
+ in_buf += skip;
+ in_len -= skip;
+
+ if (fill) {
+ /* Move the unused data to the beginning of the buffer. */
+ memcpy(in_buf_save, in_buf, in_len);
+ in_buf = in_buf_save;
+ }
+
+ if (posp)
+ *posp = skip;
+
+ for (;;) {
+ /* read uncompressed block size */
+ if (fill && in_len < 4) {
+ skip = fill(in_buf + in_len, 4 - in_len);
+ if (skip > 0)
+ in_len += skip;
+ }
+ if (in_len < 4) {
+ error("file corrupted");
+ goto exit_2;
+ }
+ dst_len = get_unaligned_be32(in_buf);
+ in_buf += 4;
+ in_len -= 4;
+
+ /* exit if last block */
+ if (dst_len == 0) {
+ if (posp)
+ *posp += 4;
+ break;
+ }
+
+ if (dst_len > LZO_BLOCK_SIZE) {
+ error("dest len longer than block size");
+ goto exit_2;
+ }
+
+ /* read compressed block size, and skip block checksum info */
+ if (fill && in_len < 8) {
+ skip = fill(in_buf + in_len, 8 - in_len);
+ if (skip > 0)
+ in_len += skip;
+ }
+ if (in_len < 8) {
+ error("file corrupted");
+ goto exit_2;
+ }
+ src_len = get_unaligned_be32(in_buf);
+ in_buf += 8;
+ in_len -= 8;
+
+ if (src_len <= 0 || src_len > dst_len) {
+ error("file corrupted");
+ goto exit_2;
+ }
+
+ /* decompress */
+ if (fill && in_len < src_len) {
+ skip = fill(in_buf + in_len, src_len - in_len);
+ if (skip > 0)
+ in_len += skip;
+ }
+ if (in_len < src_len) {
+ error("file corrupted");
+ goto exit_2;
+ }
+ tmp = dst_len;
+
+ /* When the input data is not compressed at all,
+ * lzo1x_decompress_safe will fail, so call memcpy()
+ * instead */
+ if (unlikely(dst_len == src_len))
+ memcpy(out_buf, in_buf, src_len);
+ else {
+ r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
+ out_buf, &tmp);
+
+ if (r != LZO_E_OK || dst_len != tmp) {
+ error("Compressed data violation");
+ goto exit_2;
+ }
+ }
+
+ if (flush && flush(out_buf, dst_len) != dst_len)
+ goto exit_2;
+ if (output)
+ out_buf += dst_len;
+ if (posp)
+ *posp += src_len + 12;
+
+ in_buf += src_len;
+ in_len -= src_len;
+ if (fill) {
+ /*
+ * If there happens to still be unused data left in
+ * in_buf, move it to the beginning of the buffer.
+ * Use a loop to avoid memmove() dependency.
+ */
+ if (in_len > 0)
+ for (skip = 0; skip < in_len; ++skip)
+ in_buf_save[skip] = in_buf[skip];
+ in_buf = in_buf_save;
+ }
+ }
+
+ ret = 0;
+exit_2:
+ if (!input)
+ free(in_buf_save);
+exit_1:
+ if (!output)
+ free(out_buf);
+exit:
+ return ret;
+}
+
+#ifdef PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return unlzo(buf, len, fill, flush, out_buf, pos, error);
+}
+#endif
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
new file mode 100644
index 000000000..353268b9f
--- /dev/null
+++ b/lib/decompress_unxz.c
@@ -0,0 +1,409 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
+ * Note that the margin with XZ is bigger than with Deflate (gzip)!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * incompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding incompressible data.
+ *
+ * The structure of the .xz file in case of a compressed kernel is as follows.
+ * Sizes (as bytes) of the fields are in parenthesis.
+ *
+ * Stream Header (12)
+ * Block Header:
+ * Block Header (8-12)
+ * Compressed Data (N)
+ * Block Padding (0-3)
+ * CRC32 (4)
+ * Index (8-20)
+ * Stream Footer (12)
+ *
+ * Normally there is exactly one Block, but let's assume that there are
+ * 2-4 Blocks just in case. Because Stream Header and also Block Header
+ * of the first Block don't make the decompressor produce any uncompressed
+ * data, we can ignore them from our calculations. Block Headers of possible
+ * additional Blocks have to be taken into account still. With these
+ * assumptions, it is safe to assume that the total header overhead is
+ * less than 128 bytes.
+ *
+ * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
+ * doesn't change the size of the data, it is enough to calculate the
+ * safety margin for LZMA2.
+ *
+ * LZMA2 stores the data in chunks. Each chunk has a header whose size is
+ * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
+ * the maximum chunk header size is 8 bytes. After the chunk header, there
+ * may be up to 64 KiB of actual payload in the chunk. Often the payload is
+ * quite a bit smaller though; to be safe, let's assume that an average
+ * chunk has only 32 KiB of payload.
+ *
+ * The maximum uncompressed size of the payload is 2 MiB. The minimum
+ * uncompressed size of the payload is in practice never less than the
+ * payload size itself. The LZMA2 format would allow uncompressed size
+ * to be less than the payload size, but no sane compressor creates such
+ * files. LZMA2 supports storing incompressible data in uncompressed form,
+ * so there's never a need to create payloads whose uncompressed size is
+ * smaller than the compressed size.
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ * - 128 bytes for the .xz file format headers;
+ * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
+ * per chunk, each chunk having average payload size of 32 KiB); and
+ * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
+ * the decompressor never overwrites anything from the LZMA2 chunk
+ * payload it is currently reading.
+ *
+ * We get the following formula:
+ *
+ * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
+ * = 128 + (uncompressed_size >> 12) + 65536
+ *
+ * For comparison, according to arch/x86/boot/compressed/misc.c, the
+ * equivalent formula for Deflate is this:
+ *
+ * safety_margin = 18 + (uncompressed_size >> 12) + 32768
+ *
+ * Thus, when updating Deflate-only in-place kernel decompressor to
+ * support XZ, the fixed overhead has to be increased from 18+32768 bytes
+ * to 128+65536 bytes.
+ */
+
+/*
+ * STATIC is defined to "static" if we are being built for kernel
+ * decompression (pre-boot code). <linux/decompress/mm.h> will define
+ * STATIC to empty if it wasn't already defined. Since we will need to
+ * know later if we are being used for kernel decompression, we define
+ * XZ_PREBOOT here.
+ */
+#ifdef STATIC
+# define XZ_PREBOOT
+#else
+#include <linux/decompress/unxz.h>
+#endif
+#ifdef __KERNEL__
+# include <linux/decompress/mm.h>
+#endif
+#define XZ_EXTERN STATIC
+
+#ifndef XZ_PREBOOT
+# include <linux/slab.h>
+# include <linux/xz.h>
+#else
+/*
+ * Use the internal CRC32 code instead of kernel's CRC32 module, which
+ * is not available in early phase of booting.
+ */
+#define XZ_INTERNAL_CRC32 1
+
+/*
+ * For boot time use, we enable only the BCJ filter of the current
+ * architecture or none if no BCJ filter is available for the architecture.
+ */
+#ifdef CONFIG_X86
+# define XZ_DEC_X86
+#endif
+#ifdef CONFIG_PPC
+# define XZ_DEC_POWERPC
+#endif
+#ifdef CONFIG_ARM
+# define XZ_DEC_ARM
+#endif
+#ifdef CONFIG_IA64
+# define XZ_DEC_IA64
+#endif
+#ifdef CONFIG_SPARC
+# define XZ_DEC_SPARC
+#endif
+
+/*
+ * This will get the basic headers so that memeq() and others
+ * can be defined.
+ */
+#include "xz/xz_private.h"
+
+/*
+ * Replace the normal allocation functions with the versions from
+ * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
+ * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
+ * Workaround it here because the other decompressors don't need it.
+ */
+#undef kmalloc
+#undef kfree
+#undef vmalloc
+#undef vfree
+#define kmalloc(size, flags) malloc(size)
+#define kfree(ptr) free(ptr)
+#define vmalloc(size) malloc(size)
+#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
+
+/*
+ * FIXME: Not all basic memory functions are provided in architecture-specific
+ * files (yet). We define our own versions here for now, but this should be
+ * only a temporary solution.
+ *
+ * memeq and memzero are not used much and any remotely sane implementation
+ * is fast enough. memcpy/memmove speed matters in multi-call mode, but
+ * the kernel image is decompressed in single-call mode, in which only
+ * memmove speed can matter and only if there is a lot of incompressible data
+ * (LZMA2 stores incompressible chunks in uncompressed form). Thus, the
+ * functions below should just be kept small; it's probably not worth
+ * optimizing for speed.
+ */
+
+#ifndef memeq
+static bool memeq(const void *a, const void *b, size_t size)
+{
+ const uint8_t *x = a;
+ const uint8_t *y = b;
+ size_t i;
+
+ for (i = 0; i < size; ++i)
+ if (x[i] != y[i])
+ return false;
+
+ return true;
+}
+#endif
+
+#ifndef memzero
+static void memzero(void *buf, size_t size)
+{
+ uint8_t *b = buf;
+ uint8_t *e = b + size;
+
+ while (b != e)
+ *b++ = '\0';
+}
+#endif
+
+#ifndef memmove
+/* Not static to avoid a conflict with the prototype in the Linux headers. */
+void *memmove(void *dest, const void *src, size_t size)
+{
+ uint8_t *d = dest;
+ const uint8_t *s = src;
+ size_t i;
+
+ if (d < s) {
+ for (i = 0; i < size; ++i)
+ d[i] = s[i];
+ } else if (d > s) {
+ i = size;
+ while (i-- > 0)
+ d[i] = s[i];
+ }
+
+ return dest;
+}
+#endif
+
+/*
+ * Since we need memmove anyway, would use it as memcpy too.
+ * Commented out for now to avoid breaking things.
+ */
+/*
+#ifndef memcpy
+# define memcpy memmove
+#endif
+*/
+
+#include "xz/xz_crc32.c"
+#include "xz/xz_dec_stream.c"
+#include "xz/xz_dec_lzma2.c"
+#include "xz/xz_dec_bcj.c"
+
+#endif /* XZ_PREBOOT */
+
+/* Size of the input and output buffers in multi-call mode */
+#define XZ_IOBUF_SIZE 4096
+
+/*
+ * This function implements the API defined in <linux/decompress/generic.h>.
+ *
+ * This wrapper will automatically choose single-call or multi-call mode
+ * of the native XZ decoder API. The single-call mode can be used only when
+ * both input and output buffers are available as a single chunk, i.e. when
+ * fill() and flush() won't be used.
+ */
+STATIC int INIT unxz(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long *in_used,
+ void (*error)(char *x))
+{
+ struct xz_buf b;
+ struct xz_dec *s;
+ enum xz_ret ret;
+ bool must_free_in = false;
+
+#if XZ_INTERNAL_CRC32
+ xz_crc32_init();
+#endif
+
+ if (in_used != NULL)
+ *in_used = 0;
+
+ if (fill == NULL && flush == NULL)
+ s = xz_dec_init(XZ_SINGLE, 0);
+ else
+ s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
+
+ if (s == NULL)
+ goto error_alloc_state;
+
+ if (flush == NULL) {
+ b.out = out;
+ b.out_size = (size_t)-1;
+ } else {
+ b.out_size = XZ_IOBUF_SIZE;
+ b.out = malloc(XZ_IOBUF_SIZE);
+ if (b.out == NULL)
+ goto error_alloc_out;
+ }
+
+ if (in == NULL) {
+ must_free_in = true;
+ in = malloc(XZ_IOBUF_SIZE);
+ if (in == NULL)
+ goto error_alloc_in;
+ }
+
+ b.in = in;
+ b.in_pos = 0;
+ b.in_size = in_size;
+ b.out_pos = 0;
+
+ if (fill == NULL && flush == NULL) {
+ ret = xz_dec_run(s, &b);
+ } else {
+ do {
+ if (b.in_pos == b.in_size && fill != NULL) {
+ if (in_used != NULL)
+ *in_used += b.in_pos;
+
+ b.in_pos = 0;
+
+ in_size = fill(in, XZ_IOBUF_SIZE);
+ if (in_size < 0) {
+ /*
+ * This isn't an optimal error code
+ * but it probably isn't worth making
+ * a new one either.
+ */
+ ret = XZ_BUF_ERROR;
+ break;
+ }
+
+ b.in_size = in_size;
+ }
+
+ ret = xz_dec_run(s, &b);
+
+ if (flush != NULL && (b.out_pos == b.out_size
+ || (ret != XZ_OK && b.out_pos > 0))) {
+ /*
+ * Setting ret here may hide an error
+ * returned by xz_dec_run(), but probably
+ * it's not too bad.
+ */
+ if (flush(b.out, b.out_pos) != (long)b.out_pos)
+ ret = XZ_BUF_ERROR;
+
+ b.out_pos = 0;
+ }
+ } while (ret == XZ_OK);
+
+ if (must_free_in)
+ free(in);
+
+ if (flush != NULL)
+ free(b.out);
+ }
+
+ if (in_used != NULL)
+ *in_used += b.in_pos;
+
+ xz_dec_end(s);
+
+ switch (ret) {
+ case XZ_STREAM_END:
+ return 0;
+
+ case XZ_MEM_ERROR:
+ /* This can occur only in multi-call mode. */
+ error("XZ decompressor ran out of memory");
+ break;
+
+ case XZ_FORMAT_ERROR:
+ error("Input is not in the XZ format (wrong magic bytes)");
+ break;
+
+ case XZ_OPTIONS_ERROR:
+ error("Input was encoded with settings that are not "
+ "supported by this XZ decoder");
+ break;
+
+ case XZ_DATA_ERROR:
+ case XZ_BUF_ERROR:
+ error("XZ-compressed data is corrupt");
+ break;
+
+ default:
+ error("Bug in the XZ decompressor");
+ break;
+ }
+
+ return -1;
+
+error_alloc_in:
+ if (flush != NULL)
+ free(b.out);
+
+error_alloc_out:
+ xz_dec_end(s);
+
+error_alloc_state:
+ error("XZ decompressor ran out of memory");
+ return -1;
+}
+
+/*
+ * This macro is used by architecture-specific files to decompress
+ * the kernel image.
+ */
+#ifdef XZ_PREBOOT
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long olen,
+ long *pos,
+ void (*error)(char *x))
+{
+ return unxz(buf, len, fill, flush, out_buf, pos, error);
+}
+#endif
diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
new file mode 100644
index 000000000..bba2c0bb1
--- /dev/null
+++ b/lib/decompress_unzstd.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for ZSTD with a 128 KB block size is calculated below.
+ * Note that the margin with ZSTD is bigger than with GZIP or XZ!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * uncompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding uncompressible data.
+ *
+ * The structure of the .zst file in case of a compressed kernel is as follows.
+ * Maximum sizes (as bytes) of the fields are in parenthesis.
+ *
+ * Frame Header: (18)
+ * Blocks: (N)
+ * Checksum: (4)
+ *
+ * The frame header and checksum overhead is at most 22 bytes.
+ *
+ * ZSTD stores the data in blocks. Each block has a header whose size is
+ * a 3 bytes. After the block header, there is up to 128 KB of payload.
+ * The maximum uncompressed size of the payload is 128 KB. The minimum
+ * uncompressed size of the payload is never less than the payload size
+ * (excluding the block header).
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ * - 22 bytes for the .zst file format headers;
+ * - 3 bytes per every 128 KiB of uncompressed size (one block header per
+ * block); and
+ * - 128 KiB (biggest possible zstd block size) to make sure that the
+ * decompressor never overwrites anything from the block it is currently
+ * reading.
+ *
+ * We get the following formula:
+ *
+ * safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072
+ * <= 22 + (uncompressed_size >> 15) + 131072
+ */
+
+/*
+ * Preboot environments #include "path/to/decompress_unzstd.c".
+ * All of the source files we depend on must be #included.
+ * zstd's only source dependency is xxhash, which has no source
+ * dependencies.
+ *
+ * When UNZSTD_PREBOOT is defined we declare __decompress(), which is
+ * used for kernel decompression, instead of unzstd().
+ *
+ * Define __DISABLE_EXPORTS in preboot environments to prevent symbols
+ * from xxhash and zstd from being exported by the EXPORT_SYMBOL macro.
+ */
+#ifdef STATIC
+# define UNZSTD_PREBOOT
+# include "xxhash.c"
+# include "zstd/decompress_sources.h"
+#else
+#include <linux/decompress/unzstd.h>
+#endif
+
+#include <linux/decompress/mm.h>
+#include <linux/kernel.h>
+#include <linux/zstd.h>
+
+/* 128MB is the maximum window size supported by zstd. */
+#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX)
+/*
+ * Size of the input and output buffers in multi-call mode.
+ * Pick a larger size because it isn't used during kernel decompression,
+ * since that is single pass, and we have to allocate a large buffer for
+ * zstd's window anyway. The larger size speeds up initramfs decompression.
+ */
+#define ZSTD_IOBUF_SIZE (1 << 17)
+
+static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
+{
+ const zstd_error_code err = zstd_get_error_code(ret);
+
+ if (!zstd_is_error(ret))
+ return 0;
+
+ /*
+ * zstd_get_error_name() cannot be used because error takes a char *
+ * not a const char *
+ */
+ switch (err) {
+ case ZSTD_error_memory_allocation:
+ error("ZSTD decompressor ran out of memory");
+ break;
+ case ZSTD_error_prefix_unknown:
+ error("Input is not in the ZSTD format (wrong magic bytes)");
+ break;
+ case ZSTD_error_dstSize_tooSmall:
+ case ZSTD_error_corruption_detected:
+ case ZSTD_error_checksum_wrong:
+ error("ZSTD-compressed data is corrupt");
+ break;
+ default:
+ error("ZSTD-compressed data is probably corrupt");
+ break;
+ }
+ return -1;
+}
+
+/*
+ * Handle the case where we have the entire input and output in one segment.
+ * We can allocate less memory (no circular buffer for the sliding window),
+ * and avoid some memcpy() calls.
+ */
+static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
+ long out_len, long *in_pos,
+ void (*error)(char *x))
+{
+ const size_t wksp_size = zstd_dctx_workspace_bound();
+ void *wksp = large_malloc(wksp_size);
+ zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
+ int err;
+ size_t ret;
+
+ if (dctx == NULL) {
+ error("Out of memory while allocating zstd_dctx");
+ err = -1;
+ goto out;
+ }
+ /*
+ * Find out how large the frame actually is, there may be junk at
+ * the end of the frame that zstd_decompress_dctx() can't handle.
+ */
+ ret = zstd_find_frame_compressed_size(in_buf, in_len);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ in_len = (long)ret;
+
+ ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+
+ if (in_pos != NULL)
+ *in_pos = in_len;
+
+ err = 0;
+out:
+ if (wksp != NULL)
+ large_free(wksp);
+ return err;
+}
+
+static int INIT __unzstd(unsigned char *in_buf, long in_len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *in_pos,
+ void (*error)(char *x))
+{
+ zstd_in_buffer in;
+ zstd_out_buffer out;
+ zstd_frame_header header;
+ void *in_allocated = NULL;
+ void *out_allocated = NULL;
+ void *wksp = NULL;
+ size_t wksp_size;
+ zstd_dstream *dstream;
+ int err;
+ size_t ret;
+
+ /*
+ * ZSTD decompression code won't be happy if the buffer size is so big
+ * that its end address overflows. When the size is not provided, make
+ * it as big as possible without having the end address overflow.
+ */
+ if (out_len == 0)
+ out_len = UINTPTR_MAX - (uintptr_t)out_buf;
+
+ if (fill == NULL && flush == NULL)
+ /*
+ * We can decompress faster and with less memory when we have a
+ * single chunk.
+ */
+ return decompress_single(in_buf, in_len, out_buf, out_len,
+ in_pos, error);
+
+ /*
+ * If in_buf is not provided, we must be using fill(), so allocate
+ * a large enough buffer. If it is provided, it must be at least
+ * ZSTD_IOBUF_SIZE large.
+ */
+ if (in_buf == NULL) {
+ in_allocated = large_malloc(ZSTD_IOBUF_SIZE);
+ if (in_allocated == NULL) {
+ error("Out of memory while allocating input buffer");
+ err = -1;
+ goto out;
+ }
+ in_buf = in_allocated;
+ in_len = 0;
+ }
+ /* Read the first chunk, since we need to decode the frame header. */
+ if (fill != NULL)
+ in_len = fill(in_buf, ZSTD_IOBUF_SIZE);
+ if (in_len < 0) {
+ error("ZSTD-compressed data is truncated");
+ err = -1;
+ goto out;
+ }
+ /* Set the first non-empty input buffer. */
+ in.src = in_buf;
+ in.pos = 0;
+ in.size = in_len;
+ /* Allocate the output buffer if we are using flush(). */
+ if (flush != NULL) {
+ out_allocated = large_malloc(ZSTD_IOBUF_SIZE);
+ if (out_allocated == NULL) {
+ error("Out of memory while allocating output buffer");
+ err = -1;
+ goto out;
+ }
+ out_buf = out_allocated;
+ out_len = ZSTD_IOBUF_SIZE;
+ }
+ /* Set the output buffer. */
+ out.dst = out_buf;
+ out.pos = 0;
+ out.size = out_len;
+
+ /*
+ * We need to know the window size to allocate the zstd_dstream.
+ * Since we are streaming, we need to allocate a buffer for the sliding
+ * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
+ * (8 MB), so it is important to use the actual value so as not to
+ * waste memory when it is smaller.
+ */
+ ret = zstd_get_frame_header(&header, in.src, in.size);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ if (ret != 0) {
+ error("ZSTD-compressed data has an incomplete frame header");
+ err = -1;
+ goto out;
+ }
+ if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
+ error("ZSTD-compressed data has too large a window size");
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Allocate the zstd_dstream now that we know how much memory is
+ * required.
+ */
+ wksp_size = zstd_dstream_workspace_bound(header.windowSize);
+ wksp = large_malloc(wksp_size);
+ dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
+ if (dstream == NULL) {
+ error("Out of memory while allocating ZSTD_DStream");
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Decompression loop:
+ * Read more data if necessary (error if no more data can be read).
+ * Call the decompression function, which returns 0 when finished.
+ * Flush any data produced if using flush().
+ */
+ if (in_pos != NULL)
+ *in_pos = 0;
+ do {
+ /*
+ * If we need to reload data, either we have fill() and can
+ * try to get more data, or we don't and the input is truncated.
+ */
+ if (in.pos == in.size) {
+ if (in_pos != NULL)
+ *in_pos += in.pos;
+ in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1;
+ if (in_len < 0) {
+ error("ZSTD-compressed data is truncated");
+ err = -1;
+ goto out;
+ }
+ in.pos = 0;
+ in.size = in_len;
+ }
+ /* Returns zero when the frame is complete. */
+ ret = zstd_decompress_stream(dstream, &out, &in);
+ err = handle_zstd_error(ret, error);
+ if (err)
+ goto out;
+ /* Flush all of the data produced if using flush(). */
+ if (flush != NULL && out.pos > 0) {
+ if (out.pos != flush(out.dst, out.pos)) {
+ error("Failed to flush()");
+ err = -1;
+ goto out;
+ }
+ out.pos = 0;
+ }
+ } while (ret != 0);
+
+ if (in_pos != NULL)
+ *in_pos += in.pos;
+
+ err = 0;
+out:
+ if (in_allocated != NULL)
+ large_free(in_allocated);
+ if (out_allocated != NULL)
+ large_free(out_allocated);
+ if (wksp != NULL)
+ large_free(wksp);
+ return err;
+}
+
+#ifndef UNZSTD_PREBOOT
+STATIC int INIT unzstd(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error);
+}
+#else
+STATIC int INIT __decompress(unsigned char *buf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *out_buf, long out_len,
+ long *pos,
+ void (*error)(char *x))
+{
+ return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error);
+}
+#endif
diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c
new file mode 100644
index 000000000..9c060c69f
--- /dev/null
+++ b/lib/devmem_is_allowed.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A generic version of devmem_is_allowed.
+ *
+ * Based on arch/arm64/mm/mmap.c
+ *
+ * Copyright (C) 2020 Google, Inc.
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number. We mimic x86 here by
+ * disallowing access to system RAM as well as device-exclusive MMIO regions.
+ * This effectively disable read()/write() on /dev/mem.
+ */
+int devmem_is_allowed(unsigned long pfn)
+{
+ if (iomem_is_exclusive(PFN_PHYS(pfn)))
+ return 0;
+ if (!page_is_ram(pfn))
+ return 1;
+ return 0;
+}
diff --git a/lib/devres.c b/lib/devres.c
new file mode 100644
index 000000000..c44f104b5
--- /dev/null
+++ b/lib/devres.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/of_address.h>
+
+enum devm_ioremap_type {
+ DEVM_IOREMAP = 0,
+ DEVM_IOREMAP_UC,
+ DEVM_IOREMAP_WC,
+ DEVM_IOREMAP_NP,
+};
+
+void devm_ioremap_release(struct device *dev, void *res)
+{
+ iounmap(*(void __iomem **)res);
+}
+
+static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size,
+ enum devm_ioremap_type type)
+{
+ void __iomem **ptr, *addr = NULL;
+
+ ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!ptr)
+ return NULL;
+
+ switch (type) {
+ case DEVM_IOREMAP:
+ addr = ioremap(offset, size);
+ break;
+ case DEVM_IOREMAP_UC:
+ addr = ioremap_uc(offset, size);
+ break;
+ case DEVM_IOREMAP_WC:
+ addr = ioremap_wc(offset, size);
+ break;
+ case DEVM_IOREMAP_NP:
+ addr = ioremap_np(offset, size);
+ break;
+ }
+
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
+}
+EXPORT_SYMBOL(devm_ioremap);
+
+/**
+ * devm_ioremap_uc - Managed ioremap_uc()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
+}
+EXPORT_SYMBOL_GPL(devm_ioremap_uc);
+
+/**
+ * devm_ioremap_wc - Managed ioremap_wc()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
+ resource_size_t size)
+{
+ return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
+}
+EXPORT_SYMBOL(devm_ioremap_wc);
+
+/**
+ * devm_iounmap - Managed iounmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
+ */
+void devm_iounmap(struct device *dev, void __iomem *addr)
+{
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+ (__force void *)addr));
+ iounmap(addr);
+}
+EXPORT_SYMBOL(devm_iounmap);
+
+static void __iomem *
+__devm_ioremap_resource(struct device *dev, const struct resource *res,
+ enum devm_ioremap_type type)
+{
+ resource_size_t size;
+ void __iomem *dest_ptr;
+ char *pretty_name;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
+ type = DEVM_IOREMAP_NP;
+
+ size = resource_size(res);
+
+ if (res->name)
+ pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ dev_name(dev), res->name);
+ else
+ pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!pretty_name) {
+ dev_err(dev, "can't generate pretty name for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = __devm_ioremap(dev, res->start, size, type);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+
+/**
+ * devm_ioremap_resource() - check, request region, and ioremap resource
+ * @dev: generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
+ *
+ * Usage example:
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_ioremap_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
+{
+ return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
+}
+EXPORT_SYMBOL(devm_ioremap_resource);
+
+/**
+ * devm_ioremap_resource_wc() - write-combined variant of
+ * devm_ioremap_resource()
+ * @dev: generic device to handle the resource for
+ * @res: resource to be handled
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res)
+{
+ return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
+}
+
+/*
+ * devm_of_iomap - Requests a resource and maps the memory mapped IO
+ * for a given device_node managed by a given device
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach of the device.
+ *
+ * This is to be used when a device requests/maps resources described
+ * by other device tree nodes (children or otherwise).
+ *
+ * @dev: The device "managing" the resource
+ * @node: The device-tree node where the resource resides
+ * @index: index of the MMIO range in the "reg" property
+ * @size: Returns the size of the resource (pass NULL if not needed)
+ *
+ * Usage example:
+ *
+ * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ *
+ * Please Note: This is not a one-to-one replacement for of_iomap() because the
+ * of_iomap() function does not track whether the region is already mapped. If
+ * two drivers try to map the same memory, the of_iomap() function will succeed
+ * but the devm_of_iomap() function will return -EBUSY.
+ *
+ * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure.
+ */
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ struct resource res;
+
+ if (of_address_to_resource(node, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+ if (size)
+ *size = resource_size(&res);
+ return devm_ioremap_resource(dev, &res);
+}
+EXPORT_SYMBOL(devm_of_iomap);
+
+#ifdef CONFIG_HAS_IOPORT_MAP
+/*
+ * Generic iomap devres
+ */
+static void devm_ioport_map_release(struct device *dev, void *res)
+{
+ ioport_unmap(*(void __iomem **)res);
+}
+
+static int devm_ioport_map_match(struct device *dev, void *res,
+ void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioport_map - Managed ioport_map()
+ * @dev: Generic device to map ioport for
+ * @port: Port to map
+ * @nr: Number of ports to map
+ *
+ * Managed ioport_map(). Map is automatically unmapped on driver
+ * detach.
+ *
+ * Return: a pointer to the remapped memory or NULL on failure.
+ */
+void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
+ unsigned int nr)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!ptr)
+ return NULL;
+
+ addr = ioport_map(port, nr);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioport_map);
+
+/**
+ * devm_ioport_unmap - Managed ioport_unmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed ioport_unmap(). @addr must have been mapped using
+ * devm_ioport_map().
+ */
+void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+ devm_ioport_map_match, (__force void *)addr));
+}
+EXPORT_SYMBOL(devm_ioport_unmap);
+#endif /* CONFIG_HAS_IOPORT_MAP */
+
+#ifdef CONFIG_PCI
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = to_pci_dev(gendev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succeed once
+ * allocated.
+ */
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_inval;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_region;
+ }
+
+ return 0;
+
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ if (!(mask & (1 << i)))
+ continue;
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
+ const char *name)
+{
+ int request_mask = ((1 << 6) - 1) & ~mask;
+ int rc;
+
+ rc = pci_request_selected_regions(pdev, request_mask, name);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, mask, name);
+ if (rc)
+ pci_release_selected_regions(pdev, request_mask);
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unmap and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
+{
+ void __iomem * const *iomap;
+ int i;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
+#endif /* CONFIG_PCI */
+
+static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
+{
+ arch_phys_wc_del(*((int *)res));
+}
+
+/**
+ * devm_arch_phys_wc_add - Managed arch_phys_wc_add()
+ * @dev: Managed device
+ * @base: Memory base address
+ * @size: Size of memory range
+ *
+ * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
+ * See arch_phys_wc_add() for more information.
+ */
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
+{
+ int *mtrr;
+ int ret;
+
+ mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!mtrr)
+ return -ENOMEM;
+
+ ret = arch_phys_wc_add(base, size);
+ if (ret < 0) {
+ devres_free(mtrr);
+ return ret;
+ }
+
+ *mtrr = ret;
+ devres_add(dev, mtrr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_arch_phys_wc_add);
+
+struct arch_io_reserve_memtype_wc_devres {
+ resource_size_t start;
+ resource_size_t size;
+};
+
+static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
+{
+ const struct arch_io_reserve_memtype_wc_devres *this = res;
+
+ arch_io_free_memtype_wc(this->start, this->size);
+}
+
+/**
+ * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
+ * @dev: Managed device
+ * @start: Memory base address
+ * @size: Size of memory range
+ *
+ * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
+ * and sets up a release callback See arch_io_reserve_memtype_wc() for more
+ * information.
+ */
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size)
+{
+ struct arch_io_reserve_memtype_wc_devres *dr;
+ int ret;
+
+ dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!dr)
+ return -ENOMEM;
+
+ ret = arch_io_reserve_memtype_wc(start, size);
+ if (ret < 0) {
+ devres_free(dr);
+ return ret;
+ }
+
+ dr->start = start;
+ dr->size = size;
+ devres_add(dev, dr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);
diff --git a/lib/dhry.h b/lib/dhry.h
new file mode 100644
index 000000000..e1a4db8e2
--- /dev/null
+++ b/lib/dhry.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ ****************************************************************************
+ *
+ * "DHRYSTONE" Benchmark Program
+ * -----------------------------
+ *
+ * Version: C, Version 2.1
+ *
+ * File: dhry.h (part 1 of 3)
+ *
+ * Date: May 25, 1988
+ *
+ * Author: Reinhold P. Weicker
+ * Siemens AG, AUT E 51
+ * Postfach 3220
+ * 8520 Erlangen
+ * Germany (West)
+ * Phone: [+49]-9131-7-20330
+ * (8-17 Central European Time)
+ * Usenet: ..!mcsun!unido!estevax!weicker
+ *
+ * Original Version (in Ada) published in
+ * "Communications of the ACM" vol. 27., no. 10 (Oct. 1984),
+ * pp. 1013 - 1030, together with the statistics
+ * on which the distribution of statements etc. is based.
+ *
+ * In this C version, the following C library functions are used:
+ * - strcpy, strcmp (inside the measurement loop)
+ * - printf, scanf (outside the measurement loop)
+ * In addition, Berkeley UNIX system calls "times ()" or "time ()"
+ * are used for execution time measurement. For measurements
+ * on other systems, these calls have to be changed.
+ *
+ * Collection of Results:
+ * Reinhold Weicker (address see above) and
+ *
+ * Rick Richardson
+ * PC Research. Inc.
+ * 94 Apple Orchard Drive
+ * Tinton Falls, NJ 07724
+ * Phone: (201) 389-8963 (9-17 EST)
+ * Usenet: ...!uunet!pcrat!rick
+ *
+ * Please send results to Rick Richardson and/or Reinhold Weicker.
+ * Complete information should be given on hardware and software used.
+ * Hardware information includes: Machine type, CPU, type and size
+ * of caches; for microprocessors: clock frequency, memory speed
+ * (number of wait states).
+ * Software information includes: Compiler (and runtime library)
+ * manufacturer and version, compilation switches, OS version.
+ * The Operating System version may give an indication about the
+ * compiler; Dhrystone itself performs no OS calls in the measurement loop.
+ *
+ * The complete output generated by the program should be mailed
+ * such that at least some checks for correctness can be made.
+ *
+ ***************************************************************************
+ *
+ * History: This version C/2.1 has been made for two reasons:
+ *
+ * 1) There is an obvious need for a common C version of
+ * Dhrystone, since C is at present the most popular system
+ * programming language for the class of processors
+ * (microcomputers, minicomputers) where Dhrystone is used most.
+ * There should be, as far as possible, only one C version of
+ * Dhrystone such that results can be compared without
+ * restrictions. In the past, the C versions distributed
+ * by Rick Richardson (Version 1.1) and by Reinhold Weicker
+ * had small (though not significant) differences.
+ *
+ * 2) As far as it is possible without changes to the Dhrystone
+ * statistics, optimizing compilers should be prevented from
+ * removing significant statements.
+ *
+ * This C version has been developed in cooperation with
+ * Rick Richardson (Tinton Falls, NJ), it incorporates many
+ * ideas from the "Version 1.1" distributed previously by
+ * him over the UNIX network Usenet.
+ * I also thank Chaim Benedelac (National Semiconductor),
+ * David Ditzel (SUN), Earl Killian and John Mashey (MIPS),
+ * Alan Smith and Rafael Saavedra-Barrera (UC at Berkeley)
+ * for their help with comments on earlier versions of the
+ * benchmark.
+ *
+ * Changes: In the initialization part, this version follows mostly
+ * Rick Richardson's version distributed via Usenet, not the
+ * version distributed earlier via floppy disk by Reinhold Weicker.
+ * As a concession to older compilers, names have been made
+ * unique within the first 8 characters.
+ * Inside the measurement loop, this version follows the
+ * version previously distributed by Reinhold Weicker.
+ *
+ * At several places in the benchmark, code has been added,
+ * but within the measurement loop only in branches that
+ * are not executed. The intention is that optimizing compilers
+ * should be prevented from moving code out of the measurement
+ * loop, or from removing code altogether. Since the statements
+ * that are executed within the measurement loop have NOT been
+ * changed, the numbers defining the "Dhrystone distribution"
+ * (distribution of statements, operand types and locality)
+ * still hold. Except for sophisticated optimizing compilers,
+ * execution times for this version should be the same as
+ * for previous versions.
+ *
+ * Since it has proven difficult to subtract the time for the
+ * measurement loop overhead in a correct way, the loop check
+ * has been made a part of the benchmark. This does have
+ * an impact - though a very minor one - on the distribution
+ * statistics which have been updated for this version.
+ *
+ * All changes within the measurement loop are described
+ * and discussed in the companion paper "Rationale for
+ * Dhrystone version 2".
+ *
+ * Because of the self-imposed limitation that the order and
+ * distribution of the executed statements should not be
+ * changed, there are still cases where optimizing compilers
+ * may not generate code for some statements. To a certain
+ * degree, this is unavoidable for small synthetic benchmarks.
+ * Users of the benchmark are advised to check code listings
+ * whether code is generated for all statements of Dhrystone.
+ *
+ * Version 2.1 is identical to version 2.0 distributed via
+ * the UNIX network Usenet in March 1988 except that it corrects
+ * some minor deficiencies that were found by users of version 2.0.
+ * The only change within the measurement loop is that a
+ * non-executed "else" part was added to the "if" statement in
+ * Func_3, and a non-executed "else" part removed from Proc_3.
+ *
+ ***************************************************************************
+ *
+ * Compilation model and measurement (IMPORTANT):
+ *
+ * This C version of Dhrystone consists of three files:
+ * - dhry.h (this file, containing global definitions and comments)
+ * - dhry_1.c (containing the code corresponding to Ada package Pack_1)
+ * - dhry_2.c (containing the code corresponding to Ada package Pack_2)
+ *
+ * The following "ground rules" apply for measurements:
+ * - Separate compilation
+ * - No procedure merging
+ * - Otherwise, compiler optimizations are allowed but should be indicated
+ * - Default results are those without register declarations
+ * See the companion paper "Rationale for Dhrystone Version 2" for a more
+ * detailed discussion of these ground rules.
+ *
+ * For 16-Bit processors (e.g. 80186, 80286), times for all compilation
+ * models ("small", "medium", "large" etc.) should be given if possible,
+ * together with a definition of these models for the compiler system used.
+ *
+ **************************************************************************
+ *
+ * Dhrystone (C version) statistics:
+ *
+ * [Comment from the first distribution, updated for version 2.
+ * Note that because of language differences, the numbers are slightly
+ * different from the Ada version.]
+ *
+ * The following program contains statements of a high level programming
+ * language (here: C) in a distribution considered representative:
+ *
+ * assignments 52 (51.0 %)
+ * control statements 33 (32.4 %)
+ * procedure, function calls 17 (16.7 %)
+ *
+ * 103 statements are dynamically executed. The program is balanced with
+ * respect to the three aspects:
+ *
+ * - statement type
+ * - operand type
+ * - operand locality
+ * operand global, local, parameter, or constant.
+ *
+ * The combination of these three aspects is balanced only approximately.
+ *
+ * 1. Statement Type:
+ * ----------------- number
+ *
+ * V1 = V2 9
+ * (incl. V1 = F(..)
+ * V = Constant 12
+ * Assignment, 7
+ * with array element
+ * Assignment, 6
+ * with record component
+ * --
+ * 34 34
+ *
+ * X = Y +|-|"&&"|"|" Z 5
+ * X = Y +|-|"==" Constant 6
+ * X = X +|- 1 3
+ * X = Y *|/ Z 2
+ * X = Expression, 1
+ * two operators
+ * X = Expression, 1
+ * three operators
+ * --
+ * 18 18
+ *
+ * if .... 14
+ * with "else" 7
+ * without "else" 7
+ * executed 3
+ * not executed 4
+ * for ... 7 | counted every time
+ * while ... 4 | the loop condition
+ * do ... while 1 | is evaluated
+ * switch ... 1
+ * break 1
+ * declaration with 1
+ * initialization
+ * --
+ * 34 34
+ *
+ * P (...) procedure call 11
+ * user procedure 10
+ * library procedure 1
+ * X = F (...)
+ * function call 6
+ * user function 5
+ * library function 1
+ * --
+ * 17 17
+ * ---
+ * 103
+ *
+ * The average number of parameters in procedure or function calls
+ * is 1.82 (not counting the function values as implicit parameters).
+ *
+ *
+ * 2. Operators
+ * ------------
+ * number approximate
+ * percentage
+ *
+ * Arithmetic 32 50.8
+ *
+ * + 21 33.3
+ * - 7 11.1
+ * * 3 4.8
+ * / (int div) 1 1.6
+ *
+ * Comparison 27 42.8
+ *
+ * == 9 14.3
+ * /= 4 6.3
+ * > 1 1.6
+ * < 3 4.8
+ * >= 1 1.6
+ * <= 9 14.3
+ *
+ * Logic 4 6.3
+ *
+ * && (AND-THEN) 1 1.6
+ * | (OR) 1 1.6
+ * ! (NOT) 2 3.2
+ *
+ * -- -----
+ * 63 100.1
+ *
+ *
+ * 3. Operand Type (counted once per operand reference):
+ * ---------------
+ * number approximate
+ * percentage
+ *
+ * Integer 175 72.3 %
+ * Character 45 18.6 %
+ * Pointer 12 5.0 %
+ * String30 6 2.5 %
+ * Array 2 0.8 %
+ * Record 2 0.8 %
+ * --- -------
+ * 242 100.0 %
+ *
+ * When there is an access path leading to the final operand (e.g. a record
+ * component), only the final data type on the access path is counted.
+ *
+ *
+ * 4. Operand Locality:
+ * -------------------
+ * number approximate
+ * percentage
+ *
+ * local variable 114 47.1 %
+ * global variable 22 9.1 %
+ * parameter 45 18.6 %
+ * value 23 9.5 %
+ * reference 22 9.1 %
+ * function result 6 2.5 %
+ * constant 55 22.7 %
+ * --- -------
+ * 242 100.0 %
+ *
+ *
+ * The program does not compute anything meaningful, but it is syntactically
+ * and semantically correct. All variables have a value assigned to them
+ * before they are used as a source operand.
+ *
+ * There has been no explicit effort to account for the effects of a
+ * cache, or to balance the use of long or short displacements for code or
+ * data.
+ *
+ ***************************************************************************
+ */
+
+typedef enum {
+ Ident_1,
+ Ident_2,
+ Ident_3,
+ Ident_4,
+ Ident_5
+} Enumeration; /* for boolean and enumeration types in Ada, Pascal */
+
+/* General definitions: */
+
+typedef int One_Thirty;
+typedef int One_Fifty;
+typedef char Capital_Letter;
+typedef int Boolean;
+typedef char Str_30[31];
+typedef int Arr_1_Dim[50];
+typedef int Arr_2_Dim[50][50];
+
+typedef struct record {
+ struct record *Ptr_Comp;
+ Enumeration Discr;
+ union {
+ struct {
+ Enumeration Enum_Comp;
+ int Int_Comp;
+ char Str_Comp[31];
+ } var_1;
+ struct {
+ Enumeration E_Comp_2;
+ char Str_2_Comp[31];
+ } var_2;
+ struct {
+ char Ch_1_Comp;
+ char Ch_2_Comp;
+ } var_3;
+ } variant;
+} Rec_Type, *Rec_Pointer;
+
+
+extern int Int_Glob;
+extern char Ch_1_Glob;
+
+void Proc_6(Enumeration Enum_Val_Par, Enumeration *Enum_Ref_Par);
+void Proc_7(One_Fifty Int_1_Par_Val, One_Fifty Int_2_Par_Val,
+ One_Fifty *Int_Par_Ref);
+void Proc_8(Arr_1_Dim Arr_1_Par_Ref, Arr_2_Dim Arr_2_Par_Ref,
+ int Int_1_Par_Val, int Int_2_Par_Val);
+Enumeration Func_1(Capital_Letter Ch_1_Par_Val, Capital_Letter Ch_2_Par_Val);
+Boolean Func_2(Str_30 Str_1_Par_Ref, Str_30 Str_2_Par_Ref);
+
+int dhry(int n);
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
new file mode 100644
index 000000000..08edbbb19
--- /dev/null
+++ b/lib/dhry_1.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ ****************************************************************************
+ *
+ * "DHRYSTONE" Benchmark Program
+ * -----------------------------
+ *
+ * Version: C, Version 2.1
+ *
+ * File: dhry_1.c (part 2 of 3)
+ *
+ * Date: May 25, 1988
+ *
+ * Author: Reinhold P. Weicker
+ *
+ ****************************************************************************
+ */
+
+#include "dhry.h"
+
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/* Global Variables: */
+
+int Int_Glob;
+char Ch_1_Glob;
+
+static Rec_Pointer Ptr_Glob, Next_Ptr_Glob;
+static Boolean Bool_Glob;
+static char Ch_2_Glob;
+static int Arr_1_Glob[50];
+static int Arr_2_Glob[50][50];
+
+static void Proc_3(Rec_Pointer *Ptr_Ref_Par)
+/******************/
+/* executed once */
+/* Ptr_Ref_Par becomes Ptr_Glob */
+{
+ if (Ptr_Glob) {
+ /* then, executed */
+ *Ptr_Ref_Par = Ptr_Glob->Ptr_Comp;
+ }
+ Proc_7(10, Int_Glob, &Ptr_Glob->variant.var_1.Int_Comp);
+} /* Proc_3 */
+
+
+static void Proc_1(Rec_Pointer Ptr_Val_Par)
+/******************/
+/* executed once */
+{
+ Rec_Pointer Next_Record = Ptr_Val_Par->Ptr_Comp;
+ /* == Ptr_Glob_Next */
+ /* Local variable, initialized with Ptr_Val_Par->Ptr_Comp, */
+ /* corresponds to "rename" in Ada, "with" in Pascal */
+
+ *Ptr_Val_Par->Ptr_Comp = *Ptr_Glob;
+ Ptr_Val_Par->variant.var_1.Int_Comp = 5;
+ Next_Record->variant.var_1.Int_Comp =
+ Ptr_Val_Par->variant.var_1.Int_Comp;
+ Next_Record->Ptr_Comp = Ptr_Val_Par->Ptr_Comp;
+ Proc_3(&Next_Record->Ptr_Comp);
+ /* Ptr_Val_Par->Ptr_Comp->Ptr_Comp == Ptr_Glob->Ptr_Comp */
+ if (Next_Record->Discr == Ident_1) {
+ /* then, executed */
+ Next_Record->variant.var_1.Int_Comp = 6;
+ Proc_6(Ptr_Val_Par->variant.var_1.Enum_Comp,
+ &Next_Record->variant.var_1.Enum_Comp);
+ Next_Record->Ptr_Comp = Ptr_Glob->Ptr_Comp;
+ Proc_7(Next_Record->variant.var_1.Int_Comp, 10,
+ &Next_Record->variant.var_1.Int_Comp);
+ } else {
+ /* not executed */
+ *Ptr_Val_Par = *Ptr_Val_Par->Ptr_Comp;
+ }
+} /* Proc_1 */
+
+
+static void Proc_2(One_Fifty *Int_Par_Ref)
+/******************/
+/* executed once */
+/* *Int_Par_Ref == 1, becomes 4 */
+{
+ One_Fifty Int_Loc;
+ Enumeration Enum_Loc;
+
+ Int_Loc = *Int_Par_Ref + 10;
+ do {
+ /* executed once */
+ if (Ch_1_Glob == 'A') {
+ /* then, executed */
+ Int_Loc -= 1;
+ *Int_Par_Ref = Int_Loc - Int_Glob;
+ Enum_Loc = Ident_1;
+ } /* if */
+ } while (Enum_Loc != Ident_1); /* true */
+} /* Proc_2 */
+
+
+static void Proc_4(void)
+/*******/
+/* executed once */
+{
+ Boolean Bool_Loc;
+
+ Bool_Loc = Ch_1_Glob == 'A';
+ Bool_Glob = Bool_Loc | Bool_Glob;
+ Ch_2_Glob = 'B';
+} /* Proc_4 */
+
+
+static void Proc_5(void)
+/*******/
+/* executed once */
+{
+ Ch_1_Glob = 'A';
+ Bool_Glob = false;
+} /* Proc_5 */
+
+
+int dhry(int n)
+/*****/
+
+ /* main program, corresponds to procedures */
+ /* Main and Proc_0 in the Ada version */
+{
+ One_Fifty Int_1_Loc;
+ One_Fifty Int_2_Loc;
+ One_Fifty Int_3_Loc;
+ char Ch_Index;
+ Enumeration Enum_Loc;
+ Str_30 Str_1_Loc;
+ Str_30 Str_2_Loc;
+ int Run_Index;
+ int Number_Of_Runs;
+ ktime_t Begin_Time, End_Time;
+ u32 User_Time;
+
+ /* Initializations */
+
+ Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ if (!Next_Ptr_Glob)
+ return -ENOMEM;
+
+ Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC);
+ if (!Ptr_Glob) {
+ kfree(Next_Ptr_Glob);
+ return -ENOMEM;
+ }
+
+ Ptr_Glob->Ptr_Comp = Next_Ptr_Glob;
+ Ptr_Glob->Discr = Ident_1;
+ Ptr_Glob->variant.var_1.Enum_Comp = Ident_3;
+ Ptr_Glob->variant.var_1.Int_Comp = 40;
+ strcpy(Ptr_Glob->variant.var_1.Str_Comp,
+ "DHRYSTONE PROGRAM, SOME STRING");
+ strcpy(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
+
+ Arr_2_Glob[8][7] = 10;
+ /* Was missing in published program. Without this statement, */
+ /* Arr_2_Glob[8][7] would have an undefined value. */
+ /* Warning: With 16-Bit processors and Number_Of_Runs > 32000, */
+ /* overflow may occur for this array element. */
+
+ pr_debug("Dhrystone Benchmark, Version 2.1 (Language: C)\n");
+
+ Number_Of_Runs = n;
+
+ pr_debug("Execution starts, %d runs through Dhrystone\n",
+ Number_Of_Runs);
+
+ /***************/
+ /* Start timer */
+ /***************/
+
+ Begin_Time = ktime_get();
+
+ for (Run_Index = 1; Run_Index <= Number_Of_Runs; ++Run_Index) {
+ Proc_5();
+ Proc_4();
+ /* Ch_1_Glob == 'A', Ch_2_Glob == 'B', Bool_Glob == true */
+ Int_1_Loc = 2;
+ Int_2_Loc = 3;
+ strcpy(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
+ Enum_Loc = Ident_2;
+ Bool_Glob = !Func_2(Str_1_Loc, Str_2_Loc);
+ /* Bool_Glob == 1 */
+ while (Int_1_Loc < Int_2_Loc) {
+ /* loop body executed once */
+ Int_3_Loc = 5 * Int_1_Loc - Int_2_Loc;
+ /* Int_3_Loc == 7 */
+ Proc_7(Int_1_Loc, Int_2_Loc, &Int_3_Loc);
+ /* Int_3_Loc == 7 */
+ Int_1_Loc += 1;
+ } /* while */
+ /* Int_1_Loc == 3, Int_2_Loc == 3, Int_3_Loc == 7 */
+ Proc_8(Arr_1_Glob, Arr_2_Glob, Int_1_Loc, Int_3_Loc);
+ /* Int_Glob == 5 */
+ Proc_1(Ptr_Glob);
+ for (Ch_Index = 'A'; Ch_Index <= Ch_2_Glob; ++Ch_Index) {
+ /* loop body executed twice */
+ if (Enum_Loc == Func_1(Ch_Index, 'C')) {
+ /* then, not executed */
+ Proc_6(Ident_1, &Enum_Loc);
+ strcpy(Str_2_Loc, "DHRYSTONE PROGRAM, 3'RD STRING");
+ Int_2_Loc = Run_Index;
+ Int_Glob = Run_Index;
+ }
+ }
+ /* Int_1_Loc == 3, Int_2_Loc == 3, Int_3_Loc == 7 */
+ Int_2_Loc = Int_2_Loc * Int_1_Loc;
+ Int_1_Loc = Int_2_Loc / Int_3_Loc;
+ Int_2_Loc = 7 * (Int_2_Loc - Int_3_Loc) - Int_1_Loc;
+ /* Int_1_Loc == 1, Int_2_Loc == 13, Int_3_Loc == 7 */
+ Proc_2(&Int_1_Loc);
+ /* Int_1_Loc == 5 */
+
+ } /* loop "for Run_Index" */
+
+ /**************/
+ /* Stop timer */
+ /**************/
+
+ End_Time = ktime_get();
+
+#define dhry_assert_int_eq(val, expected) \
+ if (val != expected) \
+ pr_err("%s: %d (FAIL, expected %d)\n", #val, val, \
+ expected); \
+ else \
+ pr_debug("%s: %d (OK)\n", #val, val)
+
+#define dhry_assert_char_eq(val, expected) \
+ if (val != expected) \
+ pr_err("%s: %c (FAIL, expected %c)\n", #val, val, \
+ expected); \
+ else \
+ pr_debug("%s: %c (OK)\n", #val, val)
+
+#define dhry_assert_string_eq(val, expected) \
+ if (strcmp(val, expected)) \
+ pr_err("%s: %s (FAIL, expected %s)\n", #val, val, \
+ expected); \
+ else \
+ pr_debug("%s: %s (OK)\n", #val, val)
+
+ pr_debug("Execution ends\n");
+ pr_debug("Final values of the variables used in the benchmark:\n");
+ dhry_assert_int_eq(Int_Glob, 5);
+ dhry_assert_int_eq(Bool_Glob, 1);
+ dhry_assert_char_eq(Ch_1_Glob, 'A');
+ dhry_assert_char_eq(Ch_2_Glob, 'B');
+ dhry_assert_int_eq(Arr_1_Glob[8], 7);
+ dhry_assert_int_eq(Arr_2_Glob[8][7], Number_Of_Runs + 10);
+ pr_debug("Ptr_Comp: %px\n", Ptr_Glob->Ptr_Comp);
+ dhry_assert_int_eq(Ptr_Glob->Discr, 0);
+ dhry_assert_int_eq(Ptr_Glob->variant.var_1.Enum_Comp, 2);
+ dhry_assert_int_eq(Ptr_Glob->variant.var_1.Int_Comp, 17);
+ dhry_assert_string_eq(Ptr_Glob->variant.var_1.Str_Comp,
+ "DHRYSTONE PROGRAM, SOME STRING");
+ if (Next_Ptr_Glob->Ptr_Comp != Ptr_Glob->Ptr_Comp)
+ pr_err("Next_Ptr_Glob->Ptr_Comp: %px (expected %px)\n",
+ Next_Ptr_Glob->Ptr_Comp, Ptr_Glob->Ptr_Comp);
+ else
+ pr_debug("Next_Ptr_Glob->Ptr_Comp: %px\n",
+ Next_Ptr_Glob->Ptr_Comp);
+ dhry_assert_int_eq(Next_Ptr_Glob->Discr, 0);
+ dhry_assert_int_eq(Next_Ptr_Glob->variant.var_1.Enum_Comp, 1);
+ dhry_assert_int_eq(Next_Ptr_Glob->variant.var_1.Int_Comp, 18);
+ dhry_assert_string_eq(Next_Ptr_Glob->variant.var_1.Str_Comp,
+ "DHRYSTONE PROGRAM, SOME STRING");
+ dhry_assert_int_eq(Int_1_Loc, 5);
+ dhry_assert_int_eq(Int_2_Loc, 13);
+ dhry_assert_int_eq(Int_3_Loc, 7);
+ dhry_assert_int_eq(Enum_Loc, 1);
+ dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
+ dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
+
+ User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time));
+
+ kfree(Ptr_Glob);
+ kfree(Next_Ptr_Glob);
+
+ /* Measurements should last at least 2 seconds */
+ if (User_Time < 2 * MSEC_PER_SEC)
+ return -EAGAIN;
+
+ return div_u64(mul_u32_u32(MSEC_PER_SEC, Number_Of_Runs), User_Time);
+}
diff --git a/lib/dhry_2.c b/lib/dhry_2.c
new file mode 100644
index 000000000..c19e661f3
--- /dev/null
+++ b/lib/dhry_2.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ ****************************************************************************
+ *
+ * "DHRYSTONE" Benchmark Program
+ * -----------------------------
+ *
+ * Version: C, Version 2.1
+ *
+ * File: dhry_2.c (part 3 of 3)
+ *
+ * Date: May 25, 1988
+ *
+ * Author: Reinhold P. Weicker
+ *
+ ****************************************************************************
+ */
+
+#include "dhry.h"
+
+#include <linux/string.h>
+
+
+static Boolean Func_3(Enumeration Enum_Par_Val)
+/***************************/
+/* executed once */
+/* Enum_Par_Val == Ident_3 */
+{
+ Enumeration Enum_Loc;
+
+ Enum_Loc = Enum_Par_Val;
+ if (Enum_Loc == Ident_3) {
+ /* then, executed */
+ return true;
+ } else {
+ /* not executed */
+ return false;
+ }
+} /* Func_3 */
+
+
+void Proc_6(Enumeration Enum_Val_Par, Enumeration *Enum_Ref_Par)
+/*********************************/
+/* executed once */
+/* Enum_Val_Par == Ident_3, Enum_Ref_Par becomes Ident_2 */
+{
+ *Enum_Ref_Par = Enum_Val_Par;
+ if (!Func_3(Enum_Val_Par)) {
+ /* then, not executed */
+ *Enum_Ref_Par = Ident_4;
+ }
+ switch (Enum_Val_Par) {
+ case Ident_1:
+ *Enum_Ref_Par = Ident_1;
+ break;
+ case Ident_2:
+ if (Int_Glob > 100) {
+ /* then */
+ *Enum_Ref_Par = Ident_1;
+ } else {
+ *Enum_Ref_Par = Ident_4;
+ }
+ break;
+ case Ident_3: /* executed */
+ *Enum_Ref_Par = Ident_2;
+ break;
+ case Ident_4:
+ break;
+ case Ident_5:
+ *Enum_Ref_Par = Ident_3;
+ break;
+ } /* switch */
+} /* Proc_6 */
+
+
+void Proc_7(One_Fifty Int_1_Par_Val, One_Fifty Int_2_Par_Val, One_Fifty *Int_Par_Ref)
+/**********************************************/
+/* executed three times */
+/* first call: Int_1_Par_Val == 2, Int_2_Par_Val == 3, */
+/* Int_Par_Ref becomes 7 */
+/* second call: Int_1_Par_Val == 10, Int_2_Par_Val == 5, */
+/* Int_Par_Ref becomes 17 */
+/* third call: Int_1_Par_Val == 6, Int_2_Par_Val == 10, */
+/* Int_Par_Ref becomes 18 */
+{
+ One_Fifty Int_Loc;
+
+ Int_Loc = Int_1_Par_Val + 2;
+ *Int_Par_Ref = Int_2_Par_Val + Int_Loc;
+} /* Proc_7 */
+
+
+void Proc_8(Arr_1_Dim Arr_1_Par_Ref, Arr_2_Dim Arr_2_Par_Ref, int Int_1_Par_Val, int Int_2_Par_Val)
+/*********************************************************************/
+/* executed once */
+/* Int_Par_Val_1 == 3 */
+/* Int_Par_Val_2 == 7 */
+{
+ One_Fifty Int_Index;
+ One_Fifty Int_Loc;
+
+ Int_Loc = Int_1_Par_Val + 5;
+ Arr_1_Par_Ref[Int_Loc] = Int_2_Par_Val;
+ Arr_1_Par_Ref[Int_Loc+1] = Arr_1_Par_Ref[Int_Loc];
+ Arr_1_Par_Ref[Int_Loc+30] = Int_Loc;
+ for (Int_Index = Int_Loc; Int_Index <= Int_Loc+1; ++Int_Index)
+ Arr_2_Par_Ref[Int_Loc][Int_Index] = Int_Loc;
+ Arr_2_Par_Ref[Int_Loc][Int_Loc-1] += 1;
+ Arr_2_Par_Ref[Int_Loc+20][Int_Loc] = Arr_1_Par_Ref[Int_Loc];
+ Int_Glob = 5;
+} /* Proc_8 */
+
+
+Enumeration Func_1(Capital_Letter Ch_1_Par_Val, Capital_Letter Ch_2_Par_Val)
+/*************************************************/
+/* executed three times */
+/* first call: Ch_1_Par_Val == 'H', Ch_2_Par_Val == 'R' */
+/* second call: Ch_1_Par_Val == 'A', Ch_2_Par_Val == 'C' */
+/* third call: Ch_1_Par_Val == 'B', Ch_2_Par_Val == 'C' */
+{
+ Capital_Letter Ch_1_Loc;
+ Capital_Letter Ch_2_Loc;
+
+ Ch_1_Loc = Ch_1_Par_Val;
+ Ch_2_Loc = Ch_1_Loc;
+ if (Ch_2_Loc != Ch_2_Par_Val) {
+ /* then, executed */
+ return Ident_1;
+ } else {
+ /* not executed */
+ Ch_1_Glob = Ch_1_Loc;
+ return Ident_2;
+ }
+} /* Func_1 */
+
+
+Boolean Func_2(Str_30 Str_1_Par_Ref, Str_30 Str_2_Par_Ref)
+/*************************************************/
+/* executed once */
+/* Str_1_Par_Ref == "DHRYSTONE PROGRAM, 1'ST STRING" */
+/* Str_2_Par_Ref == "DHRYSTONE PROGRAM, 2'ND STRING" */
+{
+ One_Thirty Int_Loc;
+ Capital_Letter Ch_Loc;
+
+ Int_Loc = 2;
+ while (Int_Loc <= 2) {
+ /* loop body executed once */
+ if (Func_1(Str_1_Par_Ref[Int_Loc],
+ Str_2_Par_Ref[Int_Loc+1]) == Ident_1) {
+ /* then, executed */
+ Ch_Loc = 'A';
+ Int_Loc += 1;
+ }
+ } /* if, while */
+ if (Ch_Loc >= 'W' && Ch_Loc < 'Z') {
+ /* then, not executed */
+ Int_Loc = 7;
+ }
+ if (Ch_Loc == 'R') {
+ /* then, not executed */
+ return true;
+ } else {
+ /* executed */
+ if (strcmp(Str_1_Par_Ref, Str_2_Par_Ref) > 0) {
+ /* then, not executed */
+ Int_Loc += 7;
+ Int_Glob = Int_Loc;
+ return true;
+ } else {
+ /* executed */
+ return false;
+ }
+ } /* if Ch_Loc */
+} /* Func_2 */
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
new file mode 100644
index 000000000..f15ac666e
--- /dev/null
+++ b/lib/dhry_run.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Dhrystone benchmark test module
+ *
+ * Copyright (C) 2022 Glider bv
+ */
+
+#include "dhry.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+
+#define DHRY_VAX 1757
+
+static int dhry_run_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops run_ops = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = dhry_run_set,
+};
+static bool dhry_run;
+module_param_cb(run, &run_ops, &dhry_run, 0200);
+MODULE_PARM_DESC(run, "Run the test (default: false)");
+
+static int iterations = -1;
+module_param(iterations, int, 0644);
+MODULE_PARM_DESC(iterations,
+ "Number of iterations through the benchmark (default: auto)");
+
+static void dhry_benchmark(void)
+{
+ unsigned int cpu = get_cpu();
+ int i, n;
+
+ if (iterations > 0) {
+ n = dhry(iterations);
+ goto report;
+ }
+
+ for (i = DHRY_VAX; i > 0; i <<= 1) {
+ n = dhry(i);
+ if (n != -EAGAIN)
+ break;
+ }
+
+report:
+ put_cpu();
+ if (n >= 0)
+ pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
+ n, n / DHRY_VAX);
+ else if (n == -EAGAIN)
+ pr_err("Please increase the number of iterations\n");
+ else
+ pr_err("Dhrystone benchmark failed error %pe\n", ERR_PTR(n));
+}
+
+static int dhry_run_set(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ if (val) {
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+ } else {
+ dhry_run = true;
+ }
+
+ if (dhry_run && system_state == SYSTEM_RUNNING)
+ dhry_benchmark();
+
+ return 0;
+}
+
+static int __init dhry_init(void)
+{
+ if (dhry_run)
+ dhry_benchmark();
+
+ return 0;
+}
+module_init(dhry_init);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_LICENSE("GPL");
diff --git a/lib/digsig.c b/lib/digsig.c
new file mode 100644
index 000000000..04b5e55ed
--- /dev/null
+++ b/lib/digsig.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2011 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * <dmitry.kasatkin@intel.com>
+ *
+ * File: sign.c
+ * implements signature (RSA) verification
+ * pkcs decoding is based on LibTomCrypt code
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/key.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <keys/user-type.h>
+#include <linux/mpi.h>
+#include <linux/digsig.h>
+
+static struct crypto_shash *shash;
+
+static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
+ unsigned long msglen,
+ unsigned long modulus_bitlen,
+ unsigned long *outlen)
+{
+ unsigned long modulus_len, ps_len, i;
+
+ modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0);
+
+ /* test message size */
+ if ((msglen > modulus_len) || (modulus_len < 11))
+ return NULL;
+
+ /* separate encoded message */
+ if (msg[0] != 0x00 || msg[1] != 0x01)
+ return NULL;
+
+ for (i = 2; i < modulus_len - 1; i++)
+ if (msg[i] != 0xFF)
+ break;
+
+ /* separator check */
+ if (msg[i] != 0)
+ /* There was no octet with hexadecimal value 0x00
+ to separate ps from m. */
+ return NULL;
+
+ ps_len = i - 2;
+
+ *outlen = (msglen - (2 + ps_len + 1));
+
+ return msg + 2 + ps_len + 1;
+}
+
+/*
+ * RSA Signature verification with public key
+ */
+static int digsig_verify_rsa(struct key *key,
+ const char *sig, int siglen,
+ const char *h, int hlen)
+{
+ int err = -EINVAL;
+ unsigned long len;
+ unsigned long mlen, mblen;
+ unsigned nret, l;
+ int head, i;
+ unsigned char *out1 = NULL;
+ const char *m;
+ MPI in = NULL, res = NULL, pkey[2];
+ uint8_t *p, *datap;
+ const uint8_t *endp;
+ const struct user_key_payload *ukp;
+ struct pubkey_hdr *pkh;
+
+ down_read(&key->sem);
+ ukp = user_key_payload_locked(key);
+
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ err = -EKEYREVOKED;
+ goto err1;
+ }
+
+ if (ukp->datalen < sizeof(*pkh))
+ goto err1;
+
+ pkh = (struct pubkey_hdr *)ukp->data;
+
+ if (pkh->version != 1)
+ goto err1;
+
+ if (pkh->algo != PUBKEY_ALGO_RSA)
+ goto err1;
+
+ if (pkh->nmpi != 2)
+ goto err1;
+
+ datap = pkh->mpi;
+ endp = ukp->data + ukp->datalen;
+
+ for (i = 0; i < pkh->nmpi; i++) {
+ unsigned int remaining = endp - datap;
+ pkey[i] = mpi_read_from_buffer(datap, &remaining);
+ if (IS_ERR(pkey[i])) {
+ err = PTR_ERR(pkey[i]);
+ goto err;
+ }
+ datap += remaining;
+ }
+
+ mblen = mpi_get_nbits(pkey[0]);
+ mlen = DIV_ROUND_UP(mblen, 8);
+
+ if (mlen == 0) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = -ENOMEM;
+
+ out1 = kzalloc(mlen, GFP_KERNEL);
+ if (!out1)
+ goto err;
+
+ nret = siglen;
+ in = mpi_read_from_buffer(sig, &nret);
+ if (IS_ERR(in)) {
+ err = PTR_ERR(in);
+ goto err;
+ }
+
+ res = mpi_alloc(mpi_get_nlimbs(in) * 2);
+ if (!res)
+ goto err;
+
+ err = mpi_powm(res, in, pkey[1], pkey[0]);
+ if (err)
+ goto err;
+
+ if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ p = mpi_get_buffer(res, &l, NULL);
+ if (!p) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ len = mlen;
+ head = len - l;
+ memset(out1, 0, head);
+ memcpy(out1 + head, p, l);
+
+ kfree(p);
+
+ m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len);
+
+ if (!m || len != hlen || memcmp(m, h, hlen))
+ err = -EINVAL;
+
+err:
+ mpi_free(in);
+ mpi_free(res);
+ kfree(out1);
+ while (--i >= 0)
+ mpi_free(pkey[i]);
+err1:
+ up_read(&key->sem);
+
+ return err;
+}
+
+/**
+ * digsig_verify() - digital signature verification with public key
+ * @keyring: keyring to search key in
+ * @sig: digital signature
+ * @siglen: length of the signature
+ * @data: data
+ * @datalen: length of the data
+ *
+ * Returns 0 on success, -EINVAL otherwise
+ *
+ * Verifies data integrity against digital signature.
+ * Currently only RSA is supported.
+ * Normally hash of the content is used as a data for this function.
+ *
+ */
+int digsig_verify(struct key *keyring, const char *sig, int siglen,
+ const char *data, int datalen)
+{
+ int err = -ENOMEM;
+ struct signature_hdr *sh = (struct signature_hdr *)sig;
+ struct shash_desc *desc = NULL;
+ unsigned char hash[SHA1_DIGEST_SIZE];
+ struct key *key;
+ char name[20];
+
+ if (siglen < sizeof(*sh) + 2)
+ return -EINVAL;
+
+ if (sh->algo != PUBKEY_ALGO_RSA)
+ return -ENOTSUPP;
+
+ sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid));
+
+ if (keyring) {
+ /* search in specific keyring */
+ key_ref_t kref;
+ kref = keyring_search(make_key_ref(keyring, 1UL),
+ &key_type_user, name, true);
+ if (IS_ERR(kref))
+ key = ERR_CAST(kref);
+ else
+ key = key_ref_to_ptr(kref);
+ } else {
+ key = request_key(&key_type_user, name, NULL);
+ }
+ if (IS_ERR(key)) {
+ pr_err("key not found, id: %s\n", name);
+ return PTR_ERR(key);
+ }
+
+ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc)
+ goto err;
+
+ desc->tfm = shash;
+
+ crypto_shash_init(desc);
+ crypto_shash_update(desc, data, datalen);
+ crypto_shash_update(desc, sig, sizeof(*sh));
+ crypto_shash_final(desc, hash);
+
+ kfree(desc);
+
+ /* pass signature mpis address */
+ err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh),
+ hash, sizeof(hash));
+
+err:
+ key_put(key);
+
+ return err ? -EINVAL : 0;
+}
+EXPORT_SYMBOL_GPL(digsig_verify);
+
+static int __init digsig_init(void)
+{
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash)) {
+ pr_err("shash allocation failed\n");
+ return PTR_ERR(shash);
+ }
+
+ return 0;
+
+}
+
+static void __exit digsig_cleanup(void)
+{
+ crypto_free_shash(shash);
+}
+
+module_init(digsig_init);
+module_exit(digsig_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
new file mode 100644
index 000000000..1d6858a10
--- /dev/null
+++ b/lib/dim/Makefile
@@ -0,0 +1,7 @@
+#
+# DIM Dynamic Interrupt Moderation library
+#
+
+obj-$(CONFIG_DIMLIB) += dim.o
+
+dim-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
new file mode 100644
index 000000000..e89aaf07b
--- /dev/null
+++ b/lib/dim/dim.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+bool dim_on_top(struct dim *dim)
+{
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ return true;
+ case DIM_GOING_RIGHT:
+ return (dim->steps_left > 1) && (dim->steps_right == 1);
+ default: /* DIM_GOING_LEFT */
+ return (dim->steps_right > 1) && (dim->steps_left == 1);
+ }
+}
+EXPORT_SYMBOL(dim_on_top);
+
+void dim_turn(struct dim *dim)
+{
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ dim->tune_state = DIM_GOING_LEFT;
+ dim->steps_left = 0;
+ break;
+ case DIM_GOING_LEFT:
+ dim->tune_state = DIM_GOING_RIGHT;
+ dim->steps_right = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(dim_turn);
+
+void dim_park_on_top(struct dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tired = 0;
+ dim->tune_state = DIM_PARKING_ON_TOP;
+}
+EXPORT_SYMBOL(dim_park_on_top);
+
+void dim_park_tired(struct dim *dim)
+{
+ dim->steps_right = 0;
+ dim->steps_left = 0;
+ dim->tune_state = DIM_PARKING_TIRED;
+}
+EXPORT_SYMBOL(dim_park_tired);
+
+bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+ struct dim_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+ u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+ start->byte_ctr);
+ u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
+ start->comp_ctr);
+
+ if (!delta_us)
+ return false;
+
+ curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+ curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+ curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
+ delta_us);
+ curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
+ if (curr_stats->epms != 0)
+ curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
+ curr_stats->cpms * 100, curr_stats->epms);
+ else
+ curr_stats->cpe_ratio = 0;
+
+ return true;
+}
+EXPORT_SYMBOL(dim_calc_stats);
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
new file mode 100644
index 000000000..4e32f7aaa
--- /dev/null
+++ b/lib/dim/net_dim.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+/*
+ * Net DIM profiles:
+ * There are different set of profiles for each CQ period mode.
+ * There are different set of profiles for RX/TX CQs.
+ * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
+ */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+#define NET_DIM_RX_EQE_PROFILES { \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,} \
+}
+
+#define NET_DIM_RX_CQE_PROFILES { \
+ {.usec = 2, .pkts = 256,}, \
+ {.usec = 8, .pkts = 128,}, \
+ {.usec = 16, .pkts = 64,}, \
+ {.usec = 32, .pkts = 64,}, \
+ {.usec = 64, .pkts = 64,} \
+}
+
+#define NET_DIM_TX_EQE_PROFILES { \
+ {.usec = 1, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 8, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 32, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 64, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}, \
+ {.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,} \
+}
+
+#define NET_DIM_TX_CQE_PROFILES { \
+ {.usec = 5, .pkts = 128,}, \
+ {.usec = 8, .pkts = 64,}, \
+ {.usec = 16, .pkts = 32,}, \
+ {.usec = 32, .pkts = 32,}, \
+ {.usec = 64, .pkts = 32,} \
+}
+
+static const struct dim_cq_moder
+rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_RX_EQE_PROFILES,
+ NET_DIM_RX_CQE_PROFILES,
+};
+
+static const struct dim_cq_moder
+tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_TX_EQE_PROFILES,
+ NET_DIM_TX_CQE_PROFILES,
+};
+
+struct dim_cq_moder
+net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_rx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
+
+struct dim_cq_moder
+net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
+{
+ struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
+
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
+}
+EXPORT_SYMBOL(net_dim_get_tx_moderation);
+
+struct dim_cq_moder
+net_dim_get_def_tx_moderation(u8 cq_period_mode)
+{
+ u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
+
+ return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
+}
+EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
+
+static int net_dim_step(struct dim *dim)
+{
+ if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+ return DIM_TOO_TIRED;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ case DIM_PARKING_TIRED:
+ break;
+ case DIM_GOING_RIGHT:
+ if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ break;
+ case DIM_GOING_LEFT:
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ break;
+ }
+
+ dim->tired++;
+ return DIM_STEPPED;
+}
+
+static void net_dim_exit_parking(struct dim *dim)
+{
+ dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
+ net_dim_step(dim);
+}
+
+static int net_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ if (!prev->bpms)
+ return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+ return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->ppms)
+ return curr->ppms ? DIM_STATS_BETTER :
+ DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+ return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (!prev->epms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+ return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_state = dim->tune_state;
+ int prev_ix = dim->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (dim->tune_state) {
+ case DIM_PARKING_ON_TOP:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_SAME)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_PARKING_TIRED:
+ dim->tired--;
+ if (!dim->tired)
+ net_dim_exit_parking(dim);
+ break;
+
+ case DIM_GOING_RIGHT:
+ case DIM_GOING_LEFT:
+ stats_res = net_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+ if (stats_res != DIM_STATS_BETTER)
+ dim_turn(dim);
+
+ if (dim_on_top(dim)) {
+ dim_park_on_top(dim);
+ break;
+ }
+
+ step_res = net_dim_step(dim);
+ switch (step_res) {
+ case DIM_ON_EDGE:
+ dim_park_on_top(dim);
+ break;
+ case DIM_TOO_TIRED:
+ dim_park_tired(dim);
+ break;
+ }
+
+ break;
+ }
+
+ if (prev_state != DIM_PARKING_ON_TOP ||
+ dim->tune_state != DIM_PARKING_ON_TOP)
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void net_dim(struct dim *dim, struct dim_sample end_sample)
+{
+ struct dim_stats curr_stats;
+ u16 nevents;
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = BIT_GAP(BITS_PER_TYPE(u16),
+ end_sample.event_ctr,
+ dim->start_sample.event_ctr);
+ if (nevents < DIM_NEVENTS)
+ break;
+ if (!dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats))
+ break;
+ if (net_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ fallthrough;
+ case DIM_START_MEASURE:
+ dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
+ end_sample.byte_ctr, &dim->start_sample);
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(net_dim);
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
new file mode 100644
index 000000000..88f779486
--- /dev/null
+++ b/lib/dim/rdma_dim.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/dim.h>
+
+static int rdma_dim_step(struct dim *dim)
+{
+ if (dim->tune_state == DIM_GOING_RIGHT) {
+ if (dim->profile_ix == (RDMA_DIM_PARAMS_NUM_PROFILES - 1))
+ return DIM_ON_EDGE;
+ dim->profile_ix++;
+ dim->steps_right++;
+ }
+ if (dim->tune_state == DIM_GOING_LEFT) {
+ if (dim->profile_ix == 0)
+ return DIM_ON_EDGE;
+ dim->profile_ix--;
+ dim->steps_left++;
+ }
+
+ return DIM_STEPPED;
+}
+
+static int rdma_dim_stats_compare(struct dim_stats *curr,
+ struct dim_stats *prev)
+{
+ /* first stat */
+ if (!prev->cpms)
+ return DIM_STATS_SAME;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
+ return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
+ return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
+ DIM_STATS_WORSE;
+
+ return DIM_STATS_SAME;
+}
+
+static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
+{
+ int prev_ix = dim->profile_ix;
+ u8 state = dim->tune_state;
+ int stats_res;
+ int step_res;
+
+ if (state != DIM_PARKING_ON_TOP && state != DIM_PARKING_TIRED) {
+ stats_res = rdma_dim_stats_compare(curr_stats,
+ &dim->prev_stats);
+
+ switch (stats_res) {
+ case DIM_STATS_SAME:
+ if (curr_stats->cpe_ratio <= 50 * prev_ix)
+ dim->profile_ix = 0;
+ break;
+ case DIM_STATS_WORSE:
+ dim_turn(dim);
+ fallthrough;
+ case DIM_STATS_BETTER:
+ step_res = rdma_dim_step(dim);
+ if (step_res == DIM_ON_EDGE)
+ dim_turn(dim);
+ break;
+ }
+ }
+
+ dim->prev_stats = *curr_stats;
+
+ return dim->profile_ix != prev_ix;
+}
+
+void rdma_dim(struct dim *dim, u64 completions)
+{
+ struct dim_sample *curr_sample = &dim->measuring_sample;
+ struct dim_stats curr_stats;
+ u32 nevents;
+
+ dim_update_sample_with_comps(curr_sample->event_ctr + 1, 0, 0,
+ curr_sample->comp_ctr + completions,
+ &dim->measuring_sample);
+
+ switch (dim->state) {
+ case DIM_MEASURE_IN_PROGRESS:
+ nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
+ if (nevents < DIM_NEVENTS)
+ break;
+ if (!dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats))
+ break;
+ if (rdma_dim_decision(&curr_stats, dim)) {
+ dim->state = DIM_APPLY_NEW_PROFILE;
+ schedule_work(&dim->work);
+ break;
+ }
+ fallthrough;
+ case DIM_START_MEASURE:
+ dim->state = DIM_MEASURE_IN_PROGRESS;
+ dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
+ curr_sample->comp_ctr,
+ &dim->start_sample);
+ break;
+ case DIM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
+EXPORT_SYMBOL(rdma_dim);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
new file mode 100644
index 000000000..83471e815
--- /dev/null
+++ b/lib/dump_stack.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Provide a default dump_stack() function for architectures
+ * which don't implement their own.
+ */
+
+#include <linux/kernel.h>
+#include <linux/buildid.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/atomic.h>
+#include <linux/kexec.h>
+#include <linux/utsname.h>
+#include <linux/stop_machine.h>
+
+static char dump_stack_arch_desc_str[128];
+
+/**
+ * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
+ * @fmt: printf-style format string
+ * @...: arguments for the format string
+ *
+ * The configured string will be printed right after utsname during task
+ * dumps. Usually used to add arch-specific system identifiers. If an
+ * arch wants to make use of such an ID string, it should initialize this
+ * as soon as possible during boot.
+ */
+void __init dump_stack_set_arch_desc(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
+ fmt, args);
+ va_end(args);
+}
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
+#define BUILD_ID_FMT " %20phN"
+#define BUILD_ID_VAL vmlinux_build_id
+#else
+#define BUILD_ID_FMT "%s"
+#define BUILD_ID_VAL ""
+#endif
+
+/**
+ * dump_stack_print_info - print generic debug info for dump_stack()
+ * @log_lvl: log level
+ *
+ * Arch-specific dump_stack() implementations can use this function to
+ * print out the same debug information as the generic dump_stack().
+ */
+void dump_stack_print_info(const char *log_lvl)
+{
+ printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
+ log_lvl, raw_smp_processor_id(), current->pid, current->comm,
+ kexec_crash_loaded() ? "Kdump: loaded " : "",
+ print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version, BUILD_ID_VAL);
+
+ if (dump_stack_arch_desc_str[0] != '\0')
+ printk("%sHardware name: %s\n",
+ log_lvl, dump_stack_arch_desc_str);
+
+ print_worker_info(log_lvl, current);
+ print_stop_info(log_lvl, current);
+}
+
+/**
+ * show_regs_print_info - print generic debug info for show_regs()
+ * @log_lvl: log level
+ *
+ * show_regs() implementations can use this function to print out generic
+ * debug information.
+ */
+void show_regs_print_info(const char *log_lvl)
+{
+ dump_stack_print_info(log_lvl);
+}
+
+static void __dump_stack(const char *log_lvl)
+{
+ dump_stack_print_info(log_lvl);
+ show_stack(NULL, NULL, log_lvl);
+}
+
+/**
+ * dump_stack_lvl - dump the current task information and its stack trace
+ * @log_lvl: log level
+ *
+ * Architectures can override this implementation by implementing its own.
+ */
+asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
+{
+ unsigned long flags;
+
+ /*
+ * Permit this cpu to perform nested stack dumps while serialising
+ * against other CPUs
+ */
+ printk_cpu_sync_get_irqsave(flags);
+ __dump_stack(log_lvl);
+ printk_cpu_sync_put_irqrestore(flags);
+}
+EXPORT_SYMBOL(dump_stack_lvl);
+
+asmlinkage __visible void dump_stack(void)
+{
+ dump_stack_lvl(KERN_DEFAULT);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
new file mode 100644
index 000000000..6fba6423c
--- /dev/null
+++ b/lib/dynamic_debug.c
@@ -0,0 +1,1491 @@
+/*
+ * lib/dynamic_debug.c
+ *
+ * make pr_debug()/dev_dbg() calls runtime configurable based upon their
+ * source module.
+ *
+ * Copyright (C) 2008 Jason Baron <jbaron@redhat.com>
+ * By Greg Banks <gnb@melbourne.sgi.com>
+ * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved.
+ * Copyright (C) 2011 Bart Van Assche. All Rights Reserved.
+ * Copyright (C) 2013 Du, Changbin <changbin.du@gmail.com>
+ */
+
+#define pr_fmt(fmt) "dyndbg: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kallsyms.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/sysctl.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/string_helpers.h>
+#include <linux/uaccess.h>
+#include <linux/dynamic_debug.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/jump_label.h>
+#include <linux/hardirq.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+#include <rdma/ib_verbs.h>
+
+extern struct _ddebug __start___dyndbg[];
+extern struct _ddebug __stop___dyndbg[];
+extern struct ddebug_class_map __start___dyndbg_classes[];
+extern struct ddebug_class_map __stop___dyndbg_classes[];
+
+struct ddebug_table {
+ struct list_head link, maps;
+ const char *mod_name;
+ unsigned int num_ddebugs;
+ struct _ddebug *ddebugs;
+};
+
+struct ddebug_query {
+ const char *filename;
+ const char *module;
+ const char *function;
+ const char *format;
+ const char *class_string;
+ unsigned int first_lineno, last_lineno;
+};
+
+struct ddebug_iter {
+ struct ddebug_table *table;
+ int idx;
+};
+
+struct flag_settings {
+ unsigned int flags;
+ unsigned int mask;
+};
+
+static DEFINE_MUTEX(ddebug_lock);
+static LIST_HEAD(ddebug_tables);
+static int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, " dynamic_debug/control processing "
+ "( 0 = off (default), 1 = module add/rm, 2 = >control summary, 3 = parsing, 4 = per-site changes)");
+
+/* Return the path relative to source root */
+static inline const char *trim_prefix(const char *path)
+{
+ int skip = strlen(__FILE__) - strlen("lib/dynamic_debug.c");
+
+ if (strncmp(path, __FILE__, skip))
+ skip = 0; /* prefix mismatch, don't skip */
+
+ return path + skip;
+}
+
+static const struct { unsigned flag:8; char opt_char; } opt_array[] = {
+ { _DPRINTK_FLAGS_PRINT, 'p' },
+ { _DPRINTK_FLAGS_INCL_MODNAME, 'm' },
+ { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' },
+ { _DPRINTK_FLAGS_INCL_SOURCENAME, 's' },
+ { _DPRINTK_FLAGS_INCL_LINENO, 'l' },
+ { _DPRINTK_FLAGS_INCL_TID, 't' },
+ { _DPRINTK_FLAGS_NONE, '_' },
+};
+
+struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; };
+
+/* format a string into buf[] which describes the _ddebug's flags */
+static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb)
+{
+ char *p = fb->buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_array); ++i)
+ if (flags & opt_array[i].flag)
+ *p++ = opt_array[i].opt_char;
+ if (p == fb->buf)
+ *p++ = '_';
+ *p = '\0';
+
+ return fb->buf;
+}
+
+#define vnpr_info(lvl, fmt, ...) \
+do { \
+ if (verbose >= lvl) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__)
+#define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__)
+#define v3pr_info(fmt, ...) vnpr_info(3, fmt, ##__VA_ARGS__)
+#define v4pr_info(fmt, ...) vnpr_info(4, fmt, ##__VA_ARGS__)
+
+static void vpr_info_dq(const struct ddebug_query *query, const char *msg)
+{
+ /* trim any trailing newlines */
+ int fmtlen = 0;
+
+ if (query->format) {
+ fmtlen = strlen(query->format);
+ while (fmtlen && query->format[fmtlen - 1] == '\n')
+ fmtlen--;
+ }
+
+ v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u class=%s\n",
+ msg,
+ query->function ?: "",
+ query->filename ?: "",
+ query->module ?: "",
+ fmtlen, query->format ?: "",
+ query->first_lineno, query->last_lineno, query->class_string);
+}
+
+static struct ddebug_class_map *ddebug_find_valid_class(struct ddebug_table const *dt,
+ const char *class_string, int *class_id)
+{
+ struct ddebug_class_map *map;
+ int idx;
+
+ list_for_each_entry(map, &dt->maps, link) {
+ idx = match_string(map->class_names, map->length, class_string);
+ if (idx >= 0) {
+ *class_id = idx + map->base;
+ return map;
+ }
+ }
+ *class_id = -ENOENT;
+ return NULL;
+}
+
+#define __outvar /* filled by callee */
+/*
+ * Search the tables for _ddebug's which match the given `query' and
+ * apply the `flags' and `mask' to them. Returns number of matching
+ * callsites, normally the same as number of changes. If verbose,
+ * logs the changes. Takes ddebug_lock.
+ */
+static int ddebug_change(const struct ddebug_query *query,
+ struct flag_settings *modifiers)
+{
+ int i;
+ struct ddebug_table *dt;
+ unsigned int newflags;
+ unsigned int nfound = 0;
+ struct flagsbuf fbuf, nbuf;
+ struct ddebug_class_map *map = NULL;
+ int __outvar valid_class;
+
+ /* search for matching ddebugs */
+ mutex_lock(&ddebug_lock);
+ list_for_each_entry(dt, &ddebug_tables, link) {
+
+ /* match against the module name */
+ if (query->module &&
+ !match_wildcard(query->module, dt->mod_name))
+ continue;
+
+ if (query->class_string) {
+ map = ddebug_find_valid_class(dt, query->class_string, &valid_class);
+ if (!map)
+ continue;
+ } else {
+ /* constrain query, do not touch class'd callsites */
+ valid_class = _DPRINTK_CLASS_DFLT;
+ }
+
+ for (i = 0; i < dt->num_ddebugs; i++) {
+ struct _ddebug *dp = &dt->ddebugs[i];
+
+ /* match site against query-class */
+ if (dp->class_id != valid_class)
+ continue;
+
+ /* match against the source filename */
+ if (query->filename &&
+ !match_wildcard(query->filename, dp->filename) &&
+ !match_wildcard(query->filename,
+ kbasename(dp->filename)) &&
+ !match_wildcard(query->filename,
+ trim_prefix(dp->filename)))
+ continue;
+
+ /* match against the function */
+ if (query->function &&
+ !match_wildcard(query->function, dp->function))
+ continue;
+
+ /* match against the format */
+ if (query->format) {
+ if (*query->format == '^') {
+ char *p;
+ /* anchored search. match must be at beginning */
+ p = strstr(dp->format, query->format+1);
+ if (p != dp->format)
+ continue;
+ } else if (!strstr(dp->format, query->format))
+ continue;
+ }
+
+ /* match against the line number range */
+ if (query->first_lineno &&
+ dp->lineno < query->first_lineno)
+ continue;
+ if (query->last_lineno &&
+ dp->lineno > query->last_lineno)
+ continue;
+
+ nfound++;
+
+ newflags = (dp->flags & modifiers->mask) | modifiers->flags;
+ if (newflags == dp->flags)
+ continue;
+#ifdef CONFIG_JUMP_LABEL
+ if (dp->flags & _DPRINTK_FLAGS_PRINT) {
+ if (!(newflags & _DPRINTK_FLAGS_PRINT))
+ static_branch_disable(&dp->key.dd_key_true);
+ } else if (newflags & _DPRINTK_FLAGS_PRINT) {
+ static_branch_enable(&dp->key.dd_key_true);
+ }
+#endif
+ v4pr_info("changed %s:%d [%s]%s %s => %s\n",
+ trim_prefix(dp->filename), dp->lineno,
+ dt->mod_name, dp->function,
+ ddebug_describe_flags(dp->flags, &fbuf),
+ ddebug_describe_flags(newflags, &nbuf));
+ dp->flags = newflags;
+ }
+ }
+ mutex_unlock(&ddebug_lock);
+
+ if (!nfound && verbose)
+ pr_info("no matches for query\n");
+
+ return nfound;
+}
+
+/*
+ * Split the buffer `buf' into space-separated words.
+ * Handles simple " and ' quoting, i.e. without nested,
+ * embedded or escaped \". Return the number of words
+ * or <0 on error.
+ */
+static int ddebug_tokenize(char *buf, char *words[], int maxwords)
+{
+ int nwords = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* Skip leading whitespace */
+ buf = skip_spaces(buf);
+ if (!*buf)
+ break; /* oh, it was trailing whitespace */
+ if (*buf == '#')
+ break; /* token starts comment, skip rest of line */
+
+ /* find `end' of word, whitespace separated or quoted */
+ if (*buf == '"' || *buf == '\'') {
+ int quote = *buf++;
+ for (end = buf; *end && *end != quote; end++)
+ ;
+ if (!*end) {
+ pr_err("unclosed quote: %s\n", buf);
+ return -EINVAL; /* unclosed quote */
+ }
+ } else {
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+ BUG_ON(end == buf);
+ }
+
+ /* `buf' is start of word, `end' is one past its end */
+ if (nwords == maxwords) {
+ pr_err("too many words, legal max <=%d\n", maxwords);
+ return -EINVAL; /* ran out of words[] before bytes */
+ }
+ if (*end)
+ *end++ = '\0'; /* terminate the word */
+ words[nwords++] = buf;
+ buf = end;
+ }
+
+ if (verbose >= 3) {
+ int i;
+ pr_info("split into words:");
+ for (i = 0; i < nwords; i++)
+ pr_cont(" \"%s\"", words[i]);
+ pr_cont("\n");
+ }
+
+ return nwords;
+}
+
+/*
+ * Parse a single line number. Note that the empty string ""
+ * is treated as a special case and converted to zero, which
+ * is later treated as a "don't care" value.
+ */
+static inline int parse_lineno(const char *str, unsigned int *val)
+{
+ BUG_ON(str == NULL);
+ if (*str == '\0') {
+ *val = 0;
+ return 0;
+ }
+ if (kstrtouint(str, 10, val) < 0) {
+ pr_err("bad line-number: %s\n", str);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int parse_linerange(struct ddebug_query *query, const char *first)
+{
+ char *last = strchr(first, '-');
+
+ if (query->first_lineno || query->last_lineno) {
+ pr_err("match-spec: line used 2x\n");
+ return -EINVAL;
+ }
+ if (last)
+ *last++ = '\0';
+ if (parse_lineno(first, &query->first_lineno) < 0)
+ return -EINVAL;
+ if (last) {
+ /* range <first>-<last> */
+ if (parse_lineno(last, &query->last_lineno) < 0)
+ return -EINVAL;
+
+ /* special case for last lineno not specified */
+ if (query->last_lineno == 0)
+ query->last_lineno = UINT_MAX;
+
+ if (query->last_lineno < query->first_lineno) {
+ pr_err("last-line:%d < 1st-line:%d\n",
+ query->last_lineno,
+ query->first_lineno);
+ return -EINVAL;
+ }
+ } else {
+ query->last_lineno = query->first_lineno;
+ }
+ v3pr_info("parsed line %d-%d\n", query->first_lineno,
+ query->last_lineno);
+ return 0;
+}
+
+static int check_set(const char **dest, char *src, char *name)
+{
+ int rc = 0;
+
+ if (*dest) {
+ rc = -EINVAL;
+ pr_err("match-spec:%s val:%s overridden by %s\n",
+ name, *dest, src);
+ }
+ *dest = src;
+ return rc;
+}
+
+/*
+ * Parse words[] as a ddebug query specification, which is a series
+ * of (keyword, value) pairs chosen from these possibilities:
+ *
+ * func <function-name>
+ * file <full-pathname>
+ * file <base-filename>
+ * module <module-name>
+ * format <escaped-string-to-find-in-format>
+ * line <lineno>
+ * line <first-lineno>-<last-lineno> // where either may be empty
+ *
+ * Only 1 of each type is allowed.
+ * Returns 0 on success, <0 on error.
+ */
+static int ddebug_parse_query(char *words[], int nwords,
+ struct ddebug_query *query, const char *modname)
+{
+ unsigned int i;
+ int rc = 0;
+ char *fline;
+
+ /* check we have an even number of words */
+ if (nwords % 2 != 0) {
+ pr_err("expecting pairs of match-spec <value>\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nwords; i += 2) {
+ char *keyword = words[i];
+ char *arg = words[i+1];
+
+ if (!strcmp(keyword, "func")) {
+ rc = check_set(&query->function, arg, "func");
+ } else if (!strcmp(keyword, "file")) {
+ if (check_set(&query->filename, arg, "file"))
+ return -EINVAL;
+
+ /* tail :$info is function or line-range */
+ fline = strchr(query->filename, ':');
+ if (!fline)
+ continue;
+ *fline++ = '\0';
+ if (isalpha(*fline) || *fline == '*' || *fline == '?') {
+ /* take as function name */
+ if (check_set(&query->function, fline, "func"))
+ return -EINVAL;
+ } else {
+ if (parse_linerange(query, fline))
+ return -EINVAL;
+ }
+ } else if (!strcmp(keyword, "module")) {
+ rc = check_set(&query->module, arg, "module");
+ } else if (!strcmp(keyword, "format")) {
+ string_unescape_inplace(arg, UNESCAPE_SPACE |
+ UNESCAPE_OCTAL |
+ UNESCAPE_SPECIAL);
+ rc = check_set(&query->format, arg, "format");
+ } else if (!strcmp(keyword, "line")) {
+ if (parse_linerange(query, arg))
+ return -EINVAL;
+ } else if (!strcmp(keyword, "class")) {
+ rc = check_set(&query->class_string, arg, "class");
+ } else {
+ pr_err("unknown keyword \"%s\"\n", keyword);
+ return -EINVAL;
+ }
+ if (rc)
+ return rc;
+ }
+ if (!query->module && modname)
+ /*
+ * support $modname.dyndbg=<multiple queries>, when
+ * not given in the query itself
+ */
+ query->module = modname;
+
+ vpr_info_dq(query, "parsed");
+ return 0;
+}
+
+/*
+ * Parse `str' as a flags specification, format [-+=][p]+.
+ * Sets up *maskp and *flagsp to be used when changing the
+ * flags fields of matched _ddebug's. Returns 0 on success
+ * or <0 on error.
+ */
+static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers)
+{
+ int op, i;
+
+ switch (*str) {
+ case '+':
+ case '-':
+ case '=':
+ op = *str++;
+ break;
+ default:
+ pr_err("bad flag-op %c, at start of %s\n", *str, str);
+ return -EINVAL;
+ }
+ v3pr_info("op='%c'\n", op);
+
+ for (; *str ; ++str) {
+ for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) {
+ if (*str == opt_array[i].opt_char) {
+ modifiers->flags |= opt_array[i].flag;
+ break;
+ }
+ }
+ if (i < 0) {
+ pr_err("unknown flag '%c'\n", *str);
+ return -EINVAL;
+ }
+ }
+ v3pr_info("flags=0x%x\n", modifiers->flags);
+
+ /* calculate final flags, mask based upon op */
+ switch (op) {
+ case '=':
+ /* modifiers->flags already set */
+ modifiers->mask = 0;
+ break;
+ case '+':
+ modifiers->mask = ~0U;
+ break;
+ case '-':
+ modifiers->mask = ~modifiers->flags;
+ modifiers->flags = 0;
+ break;
+ }
+ v3pr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask);
+
+ return 0;
+}
+
+static int ddebug_exec_query(char *query_string, const char *modname)
+{
+ struct flag_settings modifiers = {};
+ struct ddebug_query query = {};
+#define MAXWORDS 9
+ int nwords, nfound;
+ char *words[MAXWORDS];
+
+ nwords = ddebug_tokenize(query_string, words, MAXWORDS);
+ if (nwords <= 0) {
+ pr_err("tokenize failed\n");
+ return -EINVAL;
+ }
+ /* check flags 1st (last arg) so query is pairs of spec,val */
+ if (ddebug_parse_flags(words[nwords-1], &modifiers)) {
+ pr_err("flags parse failed\n");
+ return -EINVAL;
+ }
+ if (ddebug_parse_query(words, nwords-1, &query, modname)) {
+ pr_err("query parse failed\n");
+ return -EINVAL;
+ }
+ /* actually go and implement the change */
+ nfound = ddebug_change(&query, &modifiers);
+ vpr_info_dq(&query, nfound ? "applied" : "no-match");
+
+ return nfound;
+}
+
+/* handle multiple queries in query string, continue on error, return
+ last error or number of matching callsites. Module name is either
+ in param (for boot arg) or perhaps in query string.
+*/
+static int ddebug_exec_queries(char *query, const char *modname)
+{
+ char *split;
+ int i, errs = 0, exitcode = 0, rc, nfound = 0;
+
+ for (i = 0; query; query = split) {
+ split = strpbrk(query, ";\n");
+ if (split)
+ *split++ = '\0';
+
+ query = skip_spaces(query);
+ if (!query || !*query || *query == '#')
+ continue;
+
+ vpr_info("query %d: \"%s\" mod:%s\n", i, query, modname ?: "*");
+
+ rc = ddebug_exec_query(query, modname);
+ if (rc < 0) {
+ errs++;
+ exitcode = rc;
+ } else {
+ nfound += rc;
+ }
+ i++;
+ }
+ if (i)
+ v2pr_info("processed %d queries, with %d matches, %d errs\n",
+ i, nfound, errs);
+
+ if (exitcode)
+ return exitcode;
+ return nfound;
+}
+
+/* apply a new bitmap to the sys-knob's current bit-state */
+static int ddebug_apply_class_bitmap(const struct ddebug_class_param *dcp,
+ unsigned long *new_bits, unsigned long *old_bits)
+{
+#define QUERY_SIZE 128
+ char query[QUERY_SIZE];
+ const struct ddebug_class_map *map = dcp->map;
+ int matches = 0;
+ int bi, ct;
+
+ v2pr_info("apply: 0x%lx to: 0x%lx\n", *new_bits, *old_bits);
+
+ for (bi = 0; bi < map->length; bi++) {
+ if (test_bit(bi, new_bits) == test_bit(bi, old_bits))
+ continue;
+
+ snprintf(query, QUERY_SIZE, "class %s %c%s", map->class_names[bi],
+ test_bit(bi, new_bits) ? '+' : '-', dcp->flags);
+
+ ct = ddebug_exec_queries(query, NULL);
+ matches += ct;
+
+ v2pr_info("bit_%d: %d matches on class: %s -> 0x%lx\n", bi,
+ ct, map->class_names[bi], *new_bits);
+ }
+ return matches;
+}
+
+/* stub to later conditionally add "$module." prefix where not already done */
+#define KP_NAME(kp) kp->name
+
+#define CLASSMAP_BITMASK(width) ((1UL << (width)) - 1)
+
+/* accept comma-separated-list of [+-] classnames */
+static int param_set_dyndbg_classnames(const char *instr, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+ unsigned long curr_bits, old_bits;
+ char *cl_str, *p, *tmp;
+ int cls_id, totct = 0;
+ bool wanted;
+
+ cl_str = tmp = kstrdup(instr, GFP_KERNEL);
+ p = strchr(cl_str, '\n');
+ if (p)
+ *p = '\0';
+
+ /* start with previously set state-bits, then modify */
+ curr_bits = old_bits = *dcp->bits;
+ vpr_info("\"%s\" > %s:0x%lx\n", cl_str, KP_NAME(kp), curr_bits);
+
+ for (; cl_str; cl_str = p) {
+ p = strchr(cl_str, ',');
+ if (p)
+ *p++ = '\0';
+
+ if (*cl_str == '-') {
+ wanted = false;
+ cl_str++;
+ } else {
+ wanted = true;
+ if (*cl_str == '+')
+ cl_str++;
+ }
+ cls_id = match_string(map->class_names, map->length, cl_str);
+ if (cls_id < 0) {
+ pr_err("%s unknown to %s\n", cl_str, KP_NAME(kp));
+ continue;
+ }
+
+ /* have one or more valid class_ids of one *_NAMES type */
+ switch (map->map_type) {
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ /* the +/- pertains to a single bit */
+ if (test_bit(cls_id, &curr_bits) == wanted) {
+ v3pr_info("no change on %s\n", cl_str);
+ continue;
+ }
+ curr_bits ^= BIT(cls_id);
+ totct += ddebug_apply_class_bitmap(dcp, &curr_bits, dcp->bits);
+ *dcp->bits = curr_bits;
+ v2pr_info("%s: changed bit %d:%s\n", KP_NAME(kp), cls_id,
+ map->class_names[cls_id]);
+ break;
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ /* cls_id = N in 0..max. wanted +/- determines N or N-1 */
+ old_bits = CLASSMAP_BITMASK(*dcp->lvl);
+ curr_bits = CLASSMAP_BITMASK(cls_id + (wanted ? 1 : 0 ));
+
+ totct += ddebug_apply_class_bitmap(dcp, &curr_bits, &old_bits);
+ *dcp->lvl = (cls_id + (wanted ? 1 : 0));
+ v2pr_info("%s: changed bit-%d: \"%s\" %lx->%lx\n", KP_NAME(kp), cls_id,
+ map->class_names[cls_id], old_bits, curr_bits);
+ break;
+ default:
+ pr_err("illegal map-type value %d\n", map->map_type);
+ }
+ }
+ kfree(tmp);
+ vpr_info("total matches: %d\n", totct);
+ return 0;
+}
+
+/**
+ * param_set_dyndbg_classes - class FOO >control
+ * @instr: string echo>d to sysfs, input depends on map_type
+ * @kp: kp->arg has state: bits/lvl, map, map_type
+ *
+ * Enable/disable prdbgs by their class, as given in the arguments to
+ * DECLARE_DYNDBG_CLASSMAP. For LEVEL map-types, enforce relative
+ * levels by bitpos.
+ *
+ * Returns: 0 or <0 if error.
+ */
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+ unsigned long inrep, new_bits, old_bits;
+ int rc, totct = 0;
+
+ switch (map->map_type) {
+
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ /* handle [+-]classnames list separately, we are done here */
+ return param_set_dyndbg_classnames(instr, kp);
+
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ /* numeric input, accept and fall-thru */
+ rc = kstrtoul(instr, 0, &inrep);
+ if (rc) {
+ pr_err("expecting numeric input: %s > %s\n", instr, KP_NAME(kp));
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
+ return -EINVAL;
+ }
+
+ /* only _BITS,_NUM (numeric) map-types get here */
+ switch (map->map_type) {
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ /* expect bits. mask and warn if too many */
+ if (inrep & ~CLASSMAP_BITMASK(map->length)) {
+ pr_warn("%s: input: 0x%lx exceeds mask: 0x%lx, masking\n",
+ KP_NAME(kp), inrep, CLASSMAP_BITMASK(map->length));
+ inrep &= CLASSMAP_BITMASK(map->length);
+ }
+ v2pr_info("bits:%lx > %s\n", inrep, KP_NAME(kp));
+ totct += ddebug_apply_class_bitmap(dcp, &inrep, dcp->bits);
+ *dcp->bits = inrep;
+ break;
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ /* input is bitpos, of highest verbosity to be enabled */
+ if (inrep > map->length) {
+ pr_warn("%s: level:%ld exceeds max:%d, clamping\n",
+ KP_NAME(kp), inrep, map->length);
+ inrep = map->length;
+ }
+ old_bits = CLASSMAP_BITMASK(*dcp->lvl);
+ new_bits = CLASSMAP_BITMASK(inrep);
+ v2pr_info("lvl:%ld bits:0x%lx > %s\n", inrep, new_bits, KP_NAME(kp));
+ totct += ddebug_apply_class_bitmap(dcp, &new_bits, &old_bits);
+ *dcp->lvl = inrep;
+ break;
+ default:
+ pr_warn("%s: bad map type: %d\n", KP_NAME(kp), map->map_type);
+ }
+ vpr_info("%s: total matches: %d\n", KP_NAME(kp), totct);
+ return 0;
+}
+EXPORT_SYMBOL(param_set_dyndbg_classes);
+
+/**
+ * param_get_dyndbg_classes - classes reader
+ * @buffer: string description of controlled bits -> classes
+ * @kp: kp->arg has state: bits, map
+ *
+ * Reads last written state, underlying prdbg state may have been
+ * altered by direct >control. Displays 0x for DISJOINT, 0-N for
+ * LEVEL Returns: #chars written or <0 on error
+ */
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{
+ const struct ddebug_class_param *dcp = kp->arg;
+ const struct ddebug_class_map *map = dcp->map;
+
+ switch (map->map_type) {
+
+ case DD_CLASS_TYPE_DISJOINT_NAMES:
+ case DD_CLASS_TYPE_DISJOINT_BITS:
+ return scnprintf(buffer, PAGE_SIZE, "0x%lx\n", *dcp->bits);
+
+ case DD_CLASS_TYPE_LEVEL_NAMES:
+ case DD_CLASS_TYPE_LEVEL_NUM:
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", *dcp->lvl);
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL(param_get_dyndbg_classes);
+
+const struct kernel_param_ops param_ops_dyndbg_classes = {
+ .set = param_set_dyndbg_classes,
+ .get = param_get_dyndbg_classes,
+};
+EXPORT_SYMBOL(param_ops_dyndbg_classes);
+
+#define PREFIX_SIZE 128
+
+static int remaining(int wrote)
+{
+ if (PREFIX_SIZE - wrote > 0)
+ return PREFIX_SIZE - wrote;
+ return 0;
+}
+
+static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
+{
+ int pos_after_tid;
+ int pos = 0;
+
+ if (desc->flags & _DPRINTK_FLAGS_INCL_TID) {
+ if (in_interrupt())
+ pos += snprintf(buf + pos, remaining(pos), "<intr> ");
+ else
+ pos += snprintf(buf + pos, remaining(pos), "[%d] ",
+ task_pid_vnr(current));
+ }
+ pos_after_tid = pos;
+ if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->modname);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ desc->function);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_SOURCENAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ trim_prefix(desc->filename));
+ if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
+ pos += snprintf(buf + pos, remaining(pos), "%d:",
+ desc->lineno);
+ if (pos - pos_after_tid)
+ pos += snprintf(buf + pos, remaining(pos), " ");
+ if (pos >= PREFIX_SIZE)
+ buf[PREFIX_SIZE - 1] = '\0';
+
+ return buf;
+}
+
+static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf)
+{
+ if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY))
+ return __dynamic_emit_prefix(desc, buf);
+ return buf;
+}
+
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+ char buf[PREFIX_SIZE] = "";
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__dynamic_pr_debug);
+
+void __dynamic_dev_dbg(struct _ddebug *descriptor,
+ const struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (!dev) {
+ printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
+ } else {
+ char buf[PREFIX_SIZE] = "";
+
+ dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev), dev_name(dev),
+ &vaf);
+ }
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__dynamic_dev_dbg);
+
+#ifdef CONFIG_NET
+
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!fmt);
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (dev && dev->dev.parent) {
+ char buf[PREFIX_SIZE] = "";
+
+ dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent,
+ "%s%s %s %s%s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ &vaf);
+ } else if (dev) {
+ printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+ netdev_reg_state(dev), &vaf);
+ } else {
+ printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
+ }
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__dynamic_netdev_dbg);
+
+#endif
+
+#if IS_ENABLED(CONFIG_INFINIBAND)
+
+void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
+ const struct ib_device *ibdev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ibdev && ibdev->dev.parent) {
+ char buf[PREFIX_SIZE] = "";
+
+ dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
+ "%s%s %s %s: %pV",
+ dynamic_emit_prefix(descriptor, buf),
+ dev_driver_string(ibdev->dev.parent),
+ dev_name(ibdev->dev.parent),
+ dev_name(&ibdev->dev),
+ &vaf);
+ } else if (ibdev) {
+ printk(KERN_DEBUG "%s: %pV", dev_name(&ibdev->dev), &vaf);
+ } else {
+ printk(KERN_DEBUG "(NULL ib_device): %pV", &vaf);
+ }
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__dynamic_ibdev_dbg);
+
+#endif
+
+/*
+ * Install a noop handler to make dyndbg look like a normal kernel cli param.
+ * This avoids warnings about dyndbg being an unknown cli param when supplied
+ * by a user.
+ */
+static __init int dyndbg_setup(char *str)
+{
+ return 1;
+}
+
+__setup("dyndbg=", dyndbg_setup);
+
+/*
+ * File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the
+ * command text from userspace, parses and executes it.
+ */
+#define USER_BUF_PAGE 4096
+static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *tmpbuf;
+ int ret;
+
+ if (len == 0)
+ return 0;
+ if (len > USER_BUF_PAGE - 1) {
+ pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE);
+ return -E2BIG;
+ }
+ tmpbuf = memdup_user_nul(ubuf, len);
+ if (IS_ERR(tmpbuf))
+ return PTR_ERR(tmpbuf);
+ v2pr_info("read %zu bytes from userspace\n", len);
+
+ ret = ddebug_exec_queries(tmpbuf, NULL);
+ kfree(tmpbuf);
+ if (ret < 0)
+ return ret;
+
+ *offp += len;
+ return len;
+}
+
+/*
+ * Set the iterator to point to the first _ddebug object
+ * and return a pointer to that first object. Returns
+ * NULL if there are no _ddebugs at all.
+ */
+static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter)
+{
+ if (list_empty(&ddebug_tables)) {
+ iter->table = NULL;
+ return NULL;
+ }
+ iter->table = list_entry(ddebug_tables.next,
+ struct ddebug_table, link);
+ iter->idx = iter->table->num_ddebugs;
+ return &iter->table->ddebugs[--iter->idx];
+}
+
+/*
+ * Advance the iterator to point to the next _ddebug
+ * object from the one the iterator currently points at,
+ * and returns a pointer to the new _ddebug. Returns
+ * NULL if the iterator has seen all the _ddebugs.
+ */
+static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter)
+{
+ if (iter->table == NULL)
+ return NULL;
+ if (--iter->idx < 0) {
+ /* iterate to next table */
+ if (list_is_last(&iter->table->link, &ddebug_tables)) {
+ iter->table = NULL;
+ return NULL;
+ }
+ iter->table = list_entry(iter->table->link.next,
+ struct ddebug_table, link);
+ iter->idx = iter->table->num_ddebugs;
+ --iter->idx;
+ }
+ return &iter->table->ddebugs[iter->idx];
+}
+
+/*
+ * Seq_ops start method. Called at the start of every
+ * read() call from userspace. Takes the ddebug_lock and
+ * seeks the seq_file's iterator to the given position.
+ */
+static void *ddebug_proc_start(struct seq_file *m, loff_t *pos)
+{
+ struct ddebug_iter *iter = m->private;
+ struct _ddebug *dp;
+ int n = *pos;
+
+ mutex_lock(&ddebug_lock);
+
+ if (!n)
+ return SEQ_START_TOKEN;
+ if (n < 0)
+ return NULL;
+ dp = ddebug_iter_first(iter);
+ while (dp != NULL && --n > 0)
+ dp = ddebug_iter_next(iter);
+ return dp;
+}
+
+/*
+ * Seq_ops next method. Called several times within a read()
+ * call from userspace, with ddebug_lock held. Walks to the
+ * next _ddebug object with a special case for the header line.
+ */
+static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ struct ddebug_iter *iter = m->private;
+ struct _ddebug *dp;
+
+ if (p == SEQ_START_TOKEN)
+ dp = ddebug_iter_first(iter);
+ else
+ dp = ddebug_iter_next(iter);
+ ++*pos;
+ return dp;
+}
+
+#define class_in_range(class_id, map) \
+ (class_id >= map->base && class_id < map->base + map->length)
+
+static const char *ddebug_class_name(struct ddebug_iter *iter, struct _ddebug *dp)
+{
+ struct ddebug_class_map *map;
+
+ list_for_each_entry(map, &iter->table->maps, link)
+ if (class_in_range(dp->class_id, map))
+ return map->class_names[dp->class_id - map->base];
+
+ return NULL;
+}
+
+/*
+ * Seq_ops show method. Called several times within a read()
+ * call from userspace, with ddebug_lock held. Formats the
+ * current _ddebug as a single human-readable line, with a
+ * special case for the header line.
+ */
+static int ddebug_proc_show(struct seq_file *m, void *p)
+{
+ struct ddebug_iter *iter = m->private;
+ struct _ddebug *dp = p;
+ struct flagsbuf flags;
+ char const *class;
+
+ if (p == SEQ_START_TOKEN) {
+ seq_puts(m,
+ "# filename:lineno [module]function flags format\n");
+ return 0;
+ }
+
+ seq_printf(m, "%s:%u [%s]%s =%s \"",
+ trim_prefix(dp->filename), dp->lineno,
+ iter->table->mod_name, dp->function,
+ ddebug_describe_flags(dp->flags, &flags));
+ seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\"");
+ seq_puts(m, "\"");
+
+ if (dp->class_id != _DPRINTK_CLASS_DFLT) {
+ class = ddebug_class_name(iter, dp);
+ if (class)
+ seq_printf(m, " class:%s", class);
+ else
+ seq_printf(m, " class unknown, _id:%d", dp->class_id);
+ }
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+/*
+ * Seq_ops stop method. Called at the end of each read()
+ * call from userspace. Drops ddebug_lock.
+ */
+static void ddebug_proc_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&ddebug_lock);
+}
+
+static const struct seq_operations ddebug_proc_seqops = {
+ .start = ddebug_proc_start,
+ .next = ddebug_proc_next,
+ .show = ddebug_proc_show,
+ .stop = ddebug_proc_stop
+};
+
+static int ddebug_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open_private(file, &ddebug_proc_seqops,
+ sizeof(struct ddebug_iter));
+}
+
+static const struct file_operations ddebug_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = ddebug_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+ .write = ddebug_proc_write
+};
+
+static const struct proc_ops proc_fops = {
+ .proc_open = ddebug_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release_private,
+ .proc_write = ddebug_proc_write
+};
+
+static void ddebug_attach_module_classes(struct ddebug_table *dt,
+ struct ddebug_class_map *classes,
+ int num_classes)
+{
+ struct ddebug_class_map *cm;
+ int i, j, ct = 0;
+
+ for (cm = classes, i = 0; i < num_classes; i++, cm++) {
+
+ if (!strcmp(cm->mod_name, dt->mod_name)) {
+
+ v2pr_info("class[%d]: module:%s base:%d len:%d ty:%d\n", i,
+ cm->mod_name, cm->base, cm->length, cm->map_type);
+
+ for (j = 0; j < cm->length; j++)
+ v3pr_info(" %d: %d %s\n", j + cm->base, j,
+ cm->class_names[j]);
+
+ list_add(&cm->link, &dt->maps);
+ ct++;
+ }
+ }
+ if (ct)
+ vpr_info("module:%s attached %d classes\n", dt->mod_name, ct);
+}
+
+/*
+ * Allocate a new ddebug_table for the given module
+ * and add it to the global list.
+ */
+static int ddebug_add_module(struct _ddebug_info *di, const char *modname)
+{
+ struct ddebug_table *dt;
+
+ v3pr_info("add-module: %s.%d sites\n", modname, di->num_descs);
+ if (!di->num_descs) {
+ v3pr_info(" skip %s\n", modname);
+ return 0;
+ }
+
+ dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ if (dt == NULL) {
+ pr_err("error adding module: %s\n", modname);
+ return -ENOMEM;
+ }
+ /*
+ * For built-in modules, name lives in .rodata and is
+ * immortal. For loaded modules, name points at the name[]
+ * member of struct module, which lives at least as long as
+ * this struct ddebug_table.
+ */
+ dt->mod_name = modname;
+ dt->ddebugs = di->descs;
+ dt->num_ddebugs = di->num_descs;
+
+ INIT_LIST_HEAD(&dt->link);
+ INIT_LIST_HEAD(&dt->maps);
+
+ if (di->classes && di->num_classes)
+ ddebug_attach_module_classes(dt, di->classes, di->num_classes);
+
+ mutex_lock(&ddebug_lock);
+ list_add_tail(&dt->link, &ddebug_tables);
+ mutex_unlock(&ddebug_lock);
+
+ vpr_info("%3u debug prints in module %s\n", di->num_descs, modname);
+ return 0;
+}
+
+/* helper for ddebug_dyndbg_(boot|module)_param_cb */
+static int ddebug_dyndbg_param_cb(char *param, char *val,
+ const char *modname, int on_err)
+{
+ char *sep;
+
+ sep = strchr(param, '.');
+ if (sep) {
+ /* needed only for ddebug_dyndbg_boot_param_cb */
+ *sep = '\0';
+ modname = param;
+ param = sep + 1;
+ }
+ if (strcmp(param, "dyndbg"))
+ return on_err; /* determined by caller */
+
+ ddebug_exec_queries((val ? val : "+p"), modname);
+
+ return 0; /* query failure shouldn't stop module load */
+}
+
+/* handle both dyndbg and $module.dyndbg params at boot */
+static int ddebug_dyndbg_boot_param_cb(char *param, char *val,
+ const char *unused, void *arg)
+{
+ vpr_info("%s=\"%s\"\n", param, val);
+ return ddebug_dyndbg_param_cb(param, val, NULL, 0);
+}
+
+/*
+ * modprobe foo finds foo.params in boot-args, strips "foo.", and
+ * passes them to load_module(). This callback gets unknown params,
+ * processes dyndbg params, rejects others.
+ */
+int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module)
+{
+ vpr_info("module: %s %s=\"%s\"\n", module, param, val);
+ return ddebug_dyndbg_param_cb(param, val, module, -ENOENT);
+}
+
+static void ddebug_table_free(struct ddebug_table *dt)
+{
+ list_del_init(&dt->link);
+ kfree(dt);
+}
+
+#ifdef CONFIG_MODULES
+
+/*
+ * Called in response to a module being unloaded. Removes
+ * any ddebug_table's which point at the module.
+ */
+static int ddebug_remove_module(const char *mod_name)
+{
+ struct ddebug_table *dt, *nextdt;
+ int ret = -ENOENT;
+
+ mutex_lock(&ddebug_lock);
+ list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
+ if (dt->mod_name == mod_name) {
+ ddebug_table_free(dt);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&ddebug_lock);
+ if (!ret)
+ v2pr_info("removed module \"%s\"\n", mod_name);
+ return ret;
+}
+
+static int ddebug_module_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+ int ret = 0;
+
+ switch (val) {
+ case MODULE_STATE_COMING:
+ ret = ddebug_add_module(&mod->dyndbg_info, mod->name);
+ if (ret)
+ WARN(1, "Failed to allocate memory: dyndbg may not work properly.\n");
+ break;
+ case MODULE_STATE_GOING:
+ ddebug_remove_module(mod->name);
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block ddebug_module_nb = {
+ .notifier_call = ddebug_module_notify,
+ .priority = 0, /* dynamic debug depends on jump label */
+};
+
+#endif /* CONFIG_MODULES */
+
+static void ddebug_remove_all_tables(void)
+{
+ mutex_lock(&ddebug_lock);
+ while (!list_empty(&ddebug_tables)) {
+ struct ddebug_table *dt = list_entry(ddebug_tables.next,
+ struct ddebug_table,
+ link);
+ ddebug_table_free(dt);
+ }
+ mutex_unlock(&ddebug_lock);
+}
+
+static __initdata int ddebug_init_success;
+
+static int __init dynamic_debug_init_control(void)
+{
+ struct proc_dir_entry *procfs_dir;
+ struct dentry *debugfs_dir;
+
+ if (!ddebug_init_success)
+ return -ENODEV;
+
+ /* Create the control file in debugfs if it is enabled */
+ if (debugfs_initialized()) {
+ debugfs_dir = debugfs_create_dir("dynamic_debug", NULL);
+ debugfs_create_file("control", 0644, debugfs_dir, NULL,
+ &ddebug_proc_fops);
+ }
+
+ /* Also create the control file in procfs */
+ procfs_dir = proc_mkdir("dynamic_debug", NULL);
+ if (procfs_dir)
+ proc_create("control", 0644, procfs_dir, &proc_fops);
+
+ return 0;
+}
+
+static int __init dynamic_debug_init(void)
+{
+ struct _ddebug *iter, *iter_mod_start;
+ int ret, i, mod_sites, mod_ct;
+ const char *modname;
+ char *cmdline;
+
+ struct _ddebug_info di = {
+ .descs = __start___dyndbg,
+ .classes = __start___dyndbg_classes,
+ .num_descs = __stop___dyndbg - __start___dyndbg,
+ .num_classes = __stop___dyndbg_classes - __start___dyndbg_classes,
+ };
+
+#ifdef CONFIG_MODULES
+ ret = register_module_notifier(&ddebug_module_nb);
+ if (ret) {
+ pr_warn("Failed to register dynamic debug module notifier\n");
+ return ret;
+ }
+#endif /* CONFIG_MODULES */
+
+ if (&__start___dyndbg == &__stop___dyndbg) {
+ if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
+ pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
+ return 1;
+ }
+ pr_info("Ignore empty _ddebug table in a CONFIG_DYNAMIC_DEBUG_CORE build\n");
+ ddebug_init_success = 1;
+ return 0;
+ }
+
+ iter = iter_mod_start = __start___dyndbg;
+ modname = iter->modname;
+ i = mod_sites = mod_ct = 0;
+
+ for (; iter < __stop___dyndbg; iter++, i++, mod_sites++) {
+
+ if (strcmp(modname, iter->modname)) {
+ mod_ct++;
+ di.num_descs = mod_sites;
+ di.descs = iter_mod_start;
+ ret = ddebug_add_module(&di, modname);
+ if (ret)
+ goto out_err;
+
+ mod_sites = 0;
+ modname = iter->modname;
+ iter_mod_start = iter;
+ }
+ }
+ di.num_descs = mod_sites;
+ di.descs = iter_mod_start;
+ ret = ddebug_add_module(&di, modname);
+ if (ret)
+ goto out_err;
+
+ ddebug_init_success = 1;
+ vpr_info("%d prdebugs in %d modules, %d KiB in ddebug tables, %d kiB in __dyndbg section\n",
+ i, mod_ct, (int)((mod_ct * sizeof(struct ddebug_table)) >> 10),
+ (int)((i * sizeof(struct _ddebug)) >> 10));
+
+ if (di.num_classes)
+ v2pr_info(" %d builtin ddebug class-maps\n", di.num_classes);
+
+ /* now that ddebug tables are loaded, process all boot args
+ * again to find and activate queries given in dyndbg params.
+ * While this has already been done for known boot params, it
+ * ignored the unknown ones (dyndbg in particular). Reusing
+ * parse_args avoids ad-hoc parsing. This will also attempt
+ * to activate queries for not-yet-loaded modules, which is
+ * slightly noisy if verbose, but harmless.
+ */
+ cmdline = kstrdup(saved_command_line, GFP_KERNEL);
+ parse_args("dyndbg params", cmdline, NULL,
+ 0, 0, 0, NULL, &ddebug_dyndbg_boot_param_cb);
+ kfree(cmdline);
+ return 0;
+
+out_err:
+ ddebug_remove_all_tables();
+ return 0;
+}
+/* Allow early initialization for boot messages via boot param */
+early_initcall(dynamic_debug_init);
+
+/* Debugfs setup must be done later */
+fs_initcall(dynamic_debug_init_control);
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
new file mode 100644
index 000000000..fde0aa244
--- /dev/null
+++ b/lib/dynamic_queue_limits.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/dynamic_queue_limits.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+
+#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
+#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+
+/* Records completed count and recalculates the queue limit */
+void dql_completed(struct dql *dql, unsigned int count)
+{
+ unsigned int inprogress, prev_inprogress, limit;
+ unsigned int ovlimit, completed, num_queued;
+ bool all_prev_completed;
+
+ num_queued = READ_ONCE(dql->num_queued);
+
+ /* Can't complete more than what's in queue */
+ BUG_ON(count > num_queued - dql->num_completed);
+
+ completed = dql->num_completed + count;
+ limit = dql->limit;
+ ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
+ inprogress = num_queued - completed;
+ prev_inprogress = dql->prev_num_queued - dql->num_completed;
+ all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
+
+ if ((ovlimit && !inprogress) ||
+ (dql->prev_ovlimit && all_prev_completed)) {
+ /*
+ * Queue considered starved if:
+ * - The queue was over-limit in the last interval,
+ * and there is no more data in the queue.
+ * OR
+ * - The queue was over-limit in the previous interval and
+ * when enqueuing it was possible that all queued data
+ * had been consumed. This covers the case when queue
+ * may have becomes starved between completion processing
+ * running and next time enqueue was scheduled.
+ *
+ * When queue is starved increase the limit by the amount
+ * of bytes both sent and completed in the last interval,
+ * plus any previous over-limit.
+ */
+ limit += POSDIFF(completed, dql->prev_num_queued) +
+ dql->prev_ovlimit;
+ dql->slack_start_time = jiffies;
+ dql->lowest_slack = UINT_MAX;
+ } else if (inprogress && prev_inprogress && !all_prev_completed) {
+ /*
+ * Queue was not starved, check if the limit can be decreased.
+ * A decrease is only considered if the queue has been busy in
+ * the whole interval (the check above).
+ *
+ * If there is slack, the amount of excess data queued above
+ * the amount needed to prevent starvation, the queue limit
+ * can be decreased. To avoid hysteresis we consider the
+ * minimum amount of slack found over several iterations of the
+ * completion routine.
+ */
+ unsigned int slack, slack_last_objs;
+
+ /*
+ * Slack is the maximum of
+ * - The queue limit plus previous over-limit minus twice
+ * the number of objects completed. Note that two times
+ * number of completed bytes is a basis for an upper bound
+ * of the limit.
+ * - Portion of objects in the last queuing operation that
+ * was not part of non-zero previous over-limit. That is
+ * "round down" by non-overlimit portion of the last
+ * queueing operation.
+ */
+ slack = POSDIFF(limit + dql->prev_ovlimit,
+ 2 * (completed - dql->num_completed));
+ slack_last_objs = dql->prev_ovlimit ?
+ POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0;
+
+ slack = max(slack, slack_last_objs);
+
+ if (slack < dql->lowest_slack)
+ dql->lowest_slack = slack;
+
+ if (time_after(jiffies,
+ dql->slack_start_time + dql->slack_hold_time)) {
+ limit = POSDIFF(limit, dql->lowest_slack);
+ dql->slack_start_time = jiffies;
+ dql->lowest_slack = UINT_MAX;
+ }
+ }
+
+ /* Enforce bounds on limit */
+ limit = clamp(limit, dql->min_limit, dql->max_limit);
+
+ if (limit != dql->limit) {
+ dql->limit = limit;
+ ovlimit = 0;
+ }
+
+ dql->adj_limit = limit + completed;
+ dql->prev_ovlimit = ovlimit;
+ dql->prev_last_obj_cnt = dql->last_obj_cnt;
+ dql->num_completed = completed;
+ dql->prev_num_queued = num_queued;
+}
+EXPORT_SYMBOL(dql_completed);
+
+void dql_reset(struct dql *dql)
+{
+ /* Reset all dynamic values */
+ dql->limit = 0;
+ dql->num_queued = 0;
+ dql->num_completed = 0;
+ dql->last_obj_cnt = 0;
+ dql->prev_num_queued = 0;
+ dql->prev_last_obj_cnt = 0;
+ dql->prev_ovlimit = 0;
+ dql->lowest_slack = UINT_MAX;
+ dql->slack_start_time = jiffies;
+}
+EXPORT_SYMBOL(dql_reset);
+
+void dql_init(struct dql *dql, unsigned int hold_time)
+{
+ dql->max_limit = DQL_MAX_LIMIT;
+ dql->min_limit = 0;
+ dql->slack_hold_time = hold_time;
+ dql_reset(dql);
+}
+EXPORT_SYMBOL(dql_init);
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
new file mode 100644
index 000000000..d2c37d64f
--- /dev/null
+++ b/lib/earlycpio.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2012 Intel Corporation; author H. Peter Anvin
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * earlycpio.c
+ *
+ * Find a specific cpio member; must precede any compressed content.
+ * This is used to locate data items in the initramfs used by the
+ * kernel itself during early boot (before the main initramfs is
+ * decompressed.) It is the responsibility of the initramfs creator
+ * to ensure that these items are uncompressed at the head of the
+ * blob. Depending on the boot loader or package tool that may be a
+ * separate file or part of the same file.
+ */
+
+#include <linux/earlycpio.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+enum cpio_fields {
+ C_MAGIC,
+ C_INO,
+ C_MODE,
+ C_UID,
+ C_GID,
+ C_NLINK,
+ C_MTIME,
+ C_FILESIZE,
+ C_MAJ,
+ C_MIN,
+ C_RMAJ,
+ C_RMIN,
+ C_NAMESIZE,
+ C_CHKSUM,
+ C_NFIELDS
+};
+
+/**
+ * find_cpio_data - Search for files in an uncompressed cpio
+ * @path: The directory to search for, including a slash at the end
+ * @data: Pointer to the cpio archive or a header inside
+ * @len: Remaining length of the cpio based on data pointer
+ * @nextoff: When a matching file is found, this is the offset from the
+ * beginning of the cpio to the beginning of the next file, not the
+ * matching file itself. It can be used to iterate through the cpio
+ * to find all files inside of a directory path.
+ *
+ * Return: &struct cpio_data containing the address, length and
+ * filename (with the directory path cut off) of the found file.
+ * If you search for a filename and not for files in a directory,
+ * pass the absolute path of the filename in the cpio and make sure
+ * the match returned an empty filename string.
+ */
+
+struct cpio_data find_cpio_data(const char *path, void *data,
+ size_t len, long *nextoff)
+{
+ const size_t cpio_header_len = 8*C_NFIELDS - 2;
+ struct cpio_data cd = { NULL, 0, "" };
+ const char *p, *dptr, *nptr;
+ unsigned int ch[C_NFIELDS], *chp, v;
+ unsigned char c, x;
+ size_t mypathsize = strlen(path);
+ int i, j;
+
+ p = data;
+
+ while (len > cpio_header_len) {
+ if (!*p) {
+ /* All cpio headers need to be 4-byte aligned */
+ p += 4;
+ len -= 4;
+ continue;
+ }
+
+ j = 6; /* The magic field is only 6 characters */
+ chp = ch;
+ for (i = C_NFIELDS; i; i--) {
+ v = 0;
+ while (j--) {
+ v <<= 4;
+ c = *p++;
+
+ x = c - '0';
+ if (x < 10) {
+ v += x;
+ continue;
+ }
+
+ x = (c | 0x20) - 'a';
+ if (x < 6) {
+ v += x + 10;
+ continue;
+ }
+
+ goto quit; /* Invalid hexadecimal */
+ }
+ *chp++ = v;
+ j = 8; /* All other fields are 8 characters */
+ }
+
+ if ((ch[C_MAGIC] - 0x070701) > 1)
+ goto quit; /* Invalid magic */
+
+ len -= cpio_header_len;
+
+ dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4);
+ nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4);
+
+ if (nptr > p + len || dptr < p || nptr < dptr)
+ goto quit; /* Buffer overrun */
+
+ if ((ch[C_MODE] & 0170000) == 0100000 &&
+ ch[C_NAMESIZE] >= mypathsize &&
+ !memcmp(p, path, mypathsize)) {
+
+ if (nextoff)
+ *nextoff = (long)nptr - (long)data;
+
+ if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {
+ pr_warn(
+ "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
+ p, MAX_CPIO_FILE_NAME);
+ }
+ strscpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME);
+
+ cd.data = (void *)dptr;
+ cd.size = ch[C_FILESIZE];
+ return cd; /* Found it! */
+ }
+ len -= (nptr - p);
+ p = nptr;
+ }
+
+quit:
+ return cd;
+}
diff --git a/lib/errname.c b/lib/errname.c
new file mode 100644
index 000000000..0c336b0f1
--- /dev/null
+++ b/lib/errname.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/build_bug.h>
+#include <linux/errno.h>
+#include <linux/errname.h>
+#include <linux/kernel.h>
+#include <linux/math.h>
+
+/*
+ * Ensure these tables do not accidentally become gigantic if some
+ * huge errno makes it in. On most architectures, the first table will
+ * only have about 140 entries, but mips and parisc have more sparsely
+ * allocated errnos (with EHWPOISON = 257 on parisc, and EDQUOT = 1133
+ * on mips), so this wastes a bit of space on those - though we
+ * special case the EDQUOT case.
+ */
+#define E(err) [err + BUILD_BUG_ON_ZERO(err <= 0 || err > 300)] = "-" #err
+static const char *names_0[] = {
+ E(E2BIG),
+ E(EACCES),
+ E(EADDRINUSE),
+ E(EADDRNOTAVAIL),
+ E(EADV),
+ E(EAFNOSUPPORT),
+ E(EAGAIN), /* EWOULDBLOCK */
+ E(EALREADY),
+ E(EBADE),
+ E(EBADF),
+ E(EBADFD),
+ E(EBADMSG),
+ E(EBADR),
+ E(EBADRQC),
+ E(EBADSLT),
+ E(EBFONT),
+ E(EBUSY),
+ E(ECANCELED), /* ECANCELLED */
+ E(ECHILD),
+ E(ECHRNG),
+ E(ECOMM),
+ E(ECONNABORTED),
+ E(ECONNREFUSED), /* EREFUSED */
+ E(ECONNRESET),
+ E(EDEADLK), /* EDEADLOCK */
+#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */
+ E(EDEADLOCK),
+#endif
+ E(EDESTADDRREQ),
+ E(EDOM),
+ E(EDOTDOT),
+#ifndef CONFIG_MIPS
+ E(EDQUOT),
+#endif
+ E(EEXIST),
+ E(EFAULT),
+ E(EFBIG),
+ E(EHOSTDOWN),
+ E(EHOSTUNREACH),
+ E(EHWPOISON),
+ E(EIDRM),
+ E(EILSEQ),
+#ifdef EINIT
+ E(EINIT),
+#endif
+ E(EINPROGRESS),
+ E(EINTR),
+ E(EINVAL),
+ E(EIO),
+ E(EISCONN),
+ E(EISDIR),
+ E(EISNAM),
+ E(EKEYEXPIRED),
+ E(EKEYREJECTED),
+ E(EKEYREVOKED),
+ E(EL2HLT),
+ E(EL2NSYNC),
+ E(EL3HLT),
+ E(EL3RST),
+ E(ELIBACC),
+ E(ELIBBAD),
+ E(ELIBEXEC),
+ E(ELIBMAX),
+ E(ELIBSCN),
+ E(ELNRNG),
+ E(ELOOP),
+ E(EMEDIUMTYPE),
+ E(EMFILE),
+ E(EMLINK),
+ E(EMSGSIZE),
+ E(EMULTIHOP),
+ E(ENAMETOOLONG),
+ E(ENAVAIL),
+ E(ENETDOWN),
+ E(ENETRESET),
+ E(ENETUNREACH),
+ E(ENFILE),
+ E(ENOANO),
+ E(ENOBUFS),
+ E(ENOCSI),
+ E(ENODATA),
+ E(ENODEV),
+ E(ENOENT),
+ E(ENOEXEC),
+ E(ENOKEY),
+ E(ENOLCK),
+ E(ENOLINK),
+ E(ENOMEDIUM),
+ E(ENOMEM),
+ E(ENOMSG),
+ E(ENONET),
+ E(ENOPKG),
+ E(ENOPROTOOPT),
+ E(ENOSPC),
+ E(ENOSR),
+ E(ENOSTR),
+ E(ENOSYS),
+ E(ENOTBLK),
+ E(ENOTCONN),
+ E(ENOTDIR),
+ E(ENOTEMPTY),
+ E(ENOTNAM),
+ E(ENOTRECOVERABLE),
+ E(ENOTSOCK),
+ E(ENOTTY),
+ E(ENOTUNIQ),
+ E(ENXIO),
+ E(EOPNOTSUPP),
+ E(EOVERFLOW),
+ E(EOWNERDEAD),
+ E(EPERM),
+ E(EPFNOSUPPORT),
+ E(EPIPE),
+#ifdef EPROCLIM
+ E(EPROCLIM),
+#endif
+ E(EPROTO),
+ E(EPROTONOSUPPORT),
+ E(EPROTOTYPE),
+ E(ERANGE),
+ E(EREMCHG),
+#ifdef EREMDEV
+ E(EREMDEV),
+#endif
+ E(EREMOTE),
+ E(EREMOTEIO),
+ E(ERESTART),
+ E(ERFKILL),
+ E(EROFS),
+#ifdef ERREMOTE
+ E(ERREMOTE),
+#endif
+ E(ESHUTDOWN),
+ E(ESOCKTNOSUPPORT),
+ E(ESPIPE),
+ E(ESRCH),
+ E(ESRMNT),
+ E(ESTALE),
+ E(ESTRPIPE),
+ E(ETIME),
+ E(ETIMEDOUT),
+ E(ETOOMANYREFS),
+ E(ETXTBSY),
+ E(EUCLEAN),
+ E(EUNATCH),
+ E(EUSERS),
+ E(EXDEV),
+ E(EXFULL),
+};
+#undef E
+
+#ifdef EREFUSED /* parisc */
+static_assert(EREFUSED == ECONNREFUSED);
+#endif
+#ifdef ECANCELLED /* parisc */
+static_assert(ECANCELLED == ECANCELED);
+#endif
+static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */
+
+#define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err
+static const char *names_512[] = {
+ E(ERESTARTSYS),
+ E(ERESTARTNOINTR),
+ E(ERESTARTNOHAND),
+ E(ENOIOCTLCMD),
+ E(ERESTART_RESTARTBLOCK),
+ E(EPROBE_DEFER),
+ E(EOPENSTALE),
+ E(ENOPARAM),
+
+ E(EBADHANDLE),
+ E(ENOTSYNC),
+ E(EBADCOOKIE),
+ E(ENOTSUPP),
+ E(ETOOSMALL),
+ E(ESERVERFAULT),
+ E(EBADTYPE),
+ E(EJUKEBOX),
+ E(EIOCBQUEUED),
+ E(ERECALLCONFLICT),
+};
+#undef E
+
+static const char *__errname(unsigned err)
+{
+ if (err < ARRAY_SIZE(names_0))
+ return names_0[err];
+ if (err >= 512 && err - 512 < ARRAY_SIZE(names_512))
+ return names_512[err - 512];
+ /* But why? */
+ if (IS_ENABLED(CONFIG_MIPS) && err == EDQUOT) /* 1133 */
+ return "-EDQUOT";
+ return NULL;
+}
+
+/*
+ * errname(EIO) -> "EIO"
+ * errname(-EIO) -> "-EIO"
+ */
+const char *errname(int err)
+{
+ const char *name = __errname(abs(err));
+ if (!name)
+ return NULL;
+
+ return err > 0 ? name + 1 : name;
+}
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 000000000..887acd9a6
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+// error-inject.c: Function-level error injection table
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+/* Whitelist of symbols that can be overridden for error injection. */
+static LIST_HEAD(error_injection_list);
+static DEFINE_MUTEX(ei_mutex);
+struct ei_entry {
+ struct list_head list;
+ unsigned long start_addr;
+ unsigned long end_addr;
+ int etype;
+ void *priv;
+};
+
+bool within_error_injection_list(unsigned long addr)
+{
+ struct ei_entry *ent;
+ bool ret = false;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ei_mutex);
+ return ret;
+}
+
+int get_injectable_error_type(unsigned long addr)
+{
+ struct ei_entry *ent;
+ int ei_type = -EINVAL;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ei_type = ent->etype;
+ break;
+ }
+ }
+ mutex_unlock(&ei_mutex);
+
+ return ei_type;
+}
+
+/*
+ * Lookup and populate the error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overridden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_error_injection_list(struct error_injection_entry *start,
+ struct error_injection_entry *end,
+ void *priv)
+{
+ struct error_injection_entry *iter;
+ struct ei_entry *ent;
+ unsigned long entry, offset = 0, size = 0;
+
+ mutex_lock(&ei_mutex);
+ for (iter = start; iter < end; iter++) {
+ entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr);
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+ pr_err("Failed to find error inject entry at %p\n",
+ (void *)entry);
+ continue;
+ }
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ break;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
+ ent->etype = iter->etype;
+ ent->priv = priv;
+ INIT_LIST_HEAD(&ent->list);
+ list_add_tail(&ent->list, &error_injection_list);
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Markers of the _error_inject_whitelist section */
+extern struct error_injection_entry __start_error_injection_whitelist[];
+extern struct error_injection_entry __stop_error_injection_whitelist[];
+
+static void __init populate_kernel_ei_list(void)
+{
+ populate_error_injection_list(__start_error_injection_whitelist,
+ __stop_error_injection_whitelist,
+ NULL);
+}
+
+#ifdef CONFIG_MODULES
+static void module_load_ei_list(struct module *mod)
+{
+ if (!mod->num_ei_funcs)
+ return;
+
+ populate_error_injection_list(mod->ei_funcs,
+ mod->ei_funcs + mod->num_ei_funcs, mod);
+}
+
+static void module_unload_ei_list(struct module *mod)
+{
+ struct ei_entry *ent, *n;
+
+ if (!mod->num_ei_funcs)
+ return;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry_safe(ent, n, &error_injection_list, list) {
+ if (ent->priv == mod) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Module notifier call back, checking error injection table on the module */
+static int ei_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ if (val == MODULE_STATE_COMING)
+ module_load_ei_list(mod);
+ else if (val == MODULE_STATE_GOING)
+ module_unload_ei_list(mod);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ei_module_nb = {
+ .notifier_call = ei_module_callback,
+ .priority = 0
+};
+
+static __init int module_ei_init(void)
+{
+ return register_module_notifier(&ei_module_nb);
+}
+#else /* !CONFIG_MODULES */
+#define module_ei_init() (0)
+#endif
+
+/*
+ * error_injection/whitelist -- shows which functions can be overridden for
+ * error injection.
+ */
+static void *ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&ei_mutex);
+ return seq_list_start(&error_injection_list, *pos);
+}
+
+static void ei_seq_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&ei_mutex);
+}
+
+static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &error_injection_list, pos);
+}
+
+static const char *error_type_string(int etype)
+{
+ switch (etype) {
+ case EI_ETYPE_NULL:
+ return "NULL";
+ case EI_ETYPE_ERRNO:
+ return "ERRNO";
+ case EI_ETYPE_ERRNO_NULL:
+ return "ERRNO_NULL";
+ case EI_ETYPE_TRUE:
+ return "TRUE";
+ default:
+ return "(unknown)";
+ }
+}
+
+static int ei_seq_show(struct seq_file *m, void *v)
+{
+ struct ei_entry *ent = list_entry(v, struct ei_entry, list);
+
+ seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,
+ error_type_string(ent->etype));
+ return 0;
+}
+
+static const struct seq_operations ei_sops = {
+ .start = ei_seq_start,
+ .next = ei_seq_next,
+ .stop = ei_seq_stop,
+ .show = ei_seq_show,
+};
+
+DEFINE_SEQ_ATTRIBUTE(ei);
+
+static int __init ei_debugfs_init(void)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("error_injection", NULL);
+
+ file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init init_error_injection(void)
+{
+ populate_kernel_ei_list();
+
+ if (!module_ei_init())
+ ei_debugfs_init();
+
+ return 0;
+}
+late_initcall(init_error_injection);
diff --git a/lib/errseq.c b/lib/errseq.c
new file mode 100644
index 000000000..93e9b9435
--- /dev/null
+++ b/lib/errseq.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/atomic.h>
+#include <linux/errseq.h>
+#include <linux/log2.h>
+
+/*
+ * An errseq_t is a way of recording errors in one place, and allowing any
+ * number of "subscribers" to tell whether it has changed since a previous
+ * point where it was sampled.
+ *
+ * It's implemented as an unsigned 32-bit value. The low order bits are
+ * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits
+ * are used as a counter. This is done with atomics instead of locking so that
+ * these functions can be called from any context.
+ *
+ * The general idea is for consumers to sample an errseq_t value. That value
+ * can later be used to tell whether any new errors have occurred since that
+ * sampling was done.
+ *
+ * Note that there is a risk of collisions if new errors are being recorded
+ * frequently, since we have so few bits to use as a counter.
+ *
+ * To mitigate this, one bit is used as a flag to tell whether the value has
+ * been sampled since a new value was recorded. That allows us to avoid bumping
+ * the counter if no one has sampled it since the last time an error was
+ * recorded.
+ *
+ * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes
+ * is the special (but common) case where there has never been an error. An all
+ * zero value thus serves as the "epoch" if one wishes to know whether there
+ * has ever been an error set since it was first initialized.
+ */
+
+/* The low bits are designated for error code (max of MAX_ERRNO) */
+#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1)
+
+/* This bit is used as a flag to indicate whether the value has been seen */
+#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT)
+
+/* The lowest bit of the counter */
+#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1))
+
+/**
+ * errseq_set - set a errseq_t for later reporting
+ * @eseq: errseq_t field that should be set
+ * @err: error to set (must be between -1 and -MAX_ERRNO)
+ *
+ * This function sets the error in @eseq, and increments the sequence counter
+ * if the last sequence was sampled at some point in the past.
+ *
+ * Any error set will always overwrite an existing error.
+ *
+ * Return: The previous value, primarily for debugging purposes. The
+ * return value should not be used as a previously sampled value in later
+ * calls as it will not have the SEEN flag set.
+ */
+errseq_t errseq_set(errseq_t *eseq, int err)
+{
+ errseq_t cur, old;
+
+ /* MAX_ERRNO must be able to serve as a mask */
+ BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1);
+
+ /*
+ * Ensure the error code actually fits where we want it to go. If it
+ * doesn't then just throw a warning and don't record anything. We
+ * also don't accept zero here as that would effectively clear a
+ * previous error.
+ */
+ old = READ_ONCE(*eseq);
+
+ if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO),
+ "err = %d\n", err))
+ return old;
+
+ for (;;) {
+ errseq_t new;
+
+ /* Clear out error bits and set new error */
+ new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err;
+
+ /* Only increment if someone has looked at it */
+ if (old & ERRSEQ_SEEN)
+ new += ERRSEQ_CTR_INC;
+
+ /* If there would be no change, then call it done */
+ if (new == old) {
+ cur = new;
+ break;
+ }
+
+ /* Try to swap the new value into place */
+ cur = cmpxchg(eseq, old, new);
+
+ /*
+ * Call it success if we did the swap or someone else beat us
+ * to it for the same value.
+ */
+ if (likely(cur == old || cur == new))
+ break;
+
+ /* Raced with an update, try again */
+ old = cur;
+ }
+ return cur;
+}
+EXPORT_SYMBOL(errseq_set);
+
+/**
+ * errseq_sample() - Grab current errseq_t value.
+ * @eseq: Pointer to errseq_t to be sampled.
+ *
+ * This function allows callers to initialise their errseq_t variable.
+ * If the error has been "seen", new callers will not see an old error.
+ * If there is an unseen error in @eseq, the caller of this function will
+ * see it the next time it checks for an error.
+ *
+ * Context: Any context.
+ * Return: The current errseq value.
+ */
+errseq_t errseq_sample(errseq_t *eseq)
+{
+ errseq_t old = READ_ONCE(*eseq);
+
+ /* If nobody has seen this error yet, then we can be the first. */
+ if (!(old & ERRSEQ_SEEN))
+ old = 0;
+ return old;
+}
+EXPORT_SYMBOL(errseq_sample);
+
+/**
+ * errseq_check() - Has an error occurred since a particular sample point?
+ * @eseq: Pointer to errseq_t value to be checked.
+ * @since: Previously-sampled errseq_t from which to check.
+ *
+ * Grab the value that eseq points to, and see if it has changed @since
+ * the given value was sampled. The @since value is not advanced, so there
+ * is no need to mark the value as seen.
+ *
+ * Return: The latest error set in the errseq_t or 0 if it hasn't changed.
+ */
+int errseq_check(errseq_t *eseq, errseq_t since)
+{
+ errseq_t cur = READ_ONCE(*eseq);
+
+ if (likely(cur == since))
+ return 0;
+ return -(cur & MAX_ERRNO);
+}
+EXPORT_SYMBOL(errseq_check);
+
+/**
+ * errseq_check_and_advance() - Check an errseq_t and advance to current value.
+ * @eseq: Pointer to value being checked and reported.
+ * @since: Pointer to previously-sampled errseq_t to check against and advance.
+ *
+ * Grab the eseq value, and see whether it matches the value that @since
+ * points to. If it does, then just return 0.
+ *
+ * If it doesn't, then the value has changed. Set the "seen" flag, and try to
+ * swap it into place as the new eseq value. Then, set that value as the new
+ * "since" value, and return whatever the error portion is set to.
+ *
+ * Note that no locking is provided here for concurrent updates to the "since"
+ * value. The caller must provide that if necessary. Because of this, callers
+ * may want to do a lockless errseq_check before taking the lock and calling
+ * this.
+ *
+ * Return: Negative errno if one has been stored, or 0 if no new error has
+ * occurred.
+ */
+int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
+{
+ int err = 0;
+ errseq_t old, new;
+
+ /*
+ * Most callers will want to use the inline wrapper to check this,
+ * so that the common case of no error is handled without needing
+ * to take the lock that protects the "since" value.
+ */
+ old = READ_ONCE(*eseq);
+ if (old != *since) {
+ /*
+ * Set the flag and try to swap it into place if it has
+ * changed.
+ *
+ * We don't care about the outcome of the swap here. If the
+ * swap doesn't occur, then it has either been updated by a
+ * writer who is altering the value in some way (updating
+ * counter or resetting the error), or another reader who is
+ * just setting the "seen" flag. Either outcome is OK, and we
+ * can advance "since" and return an error based on what we
+ * have.
+ */
+ new = old | ERRSEQ_SEEN;
+ if (new != old)
+ cmpxchg(eseq, old, new);
+ *since = new;
+ err = -(new & MAX_ERRNO);
+ }
+ return err;
+}
+EXPORT_SYMBOL(errseq_check_and_advance);
diff --git a/lib/extable.c b/lib/extable.c
new file mode 100644
index 000000000..9c9f40bd2
--- /dev/null
+++ b/lib/extable.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
+ *
+ * Copyright (C) 2004 Paul Mackerras, IBM Corp.
+ */
+
+#include <linux/bsearch.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+#include <linux/extable.h>
+
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define ex_to_insn(x) ((x)->insn)
+#else
+static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->insn + x->insn;
+}
+#endif
+
+#ifndef ARCH_HAS_RELATIVE_EXTABLE
+#define swap_ex NULL
+#else
+static void swap_ex(void *a, void *b, int size)
+{
+ struct exception_table_entry *x = a, *y = b, tmp;
+ int delta = b - a;
+
+ tmp = *x;
+ x->insn = y->insn + delta;
+ y->insn = tmp.insn - delta;
+
+#ifdef swap_ex_entry_fixup
+ swap_ex_entry_fixup(x, y, tmp, delta);
+#else
+ x->fixup = y->fixup + delta;
+ y->fixup = tmp.fixup - delta;
+#endif
+}
+#endif /* ARCH_HAS_RELATIVE_EXTABLE */
+
+/*
+ * The exception table needs to be sorted so that the binary
+ * search that we use to find entries in it works properly.
+ * This is used both for the kernel exception table and for
+ * the exception tables of modules that get loaded.
+ */
+static int cmp_ex_sort(const void *a, const void *b)
+{
+ const struct exception_table_entry *x = a, *y = b;
+
+ /* avoid overflow */
+ if (ex_to_insn(x) > ex_to_insn(y))
+ return 1;
+ if (ex_to_insn(x) < ex_to_insn(y))
+ return -1;
+ return 0;
+}
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+ sort(start, finish - start, sizeof(struct exception_table_entry),
+ cmp_ex_sort, swap_ex);
+}
+
+#ifdef CONFIG_MODULES
+/*
+ * If the exception table is sorted, any referring to the module init
+ * will be at the beginning or the end.
+ */
+void trim_init_extable(struct module *m)
+{
+ /*trim the beginning*/
+ while (m->num_exentries &&
+ within_module_init(ex_to_insn(&m->extable[0]), m)) {
+ m->extable++;
+ m->num_exentries--;
+ }
+ /*trim the end*/
+ while (m->num_exentries &&
+ within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
+ m))
+ m->num_exentries--;
+}
+#endif /* CONFIG_MODULES */
+
+static int cmp_ex_search(const void *key, const void *elt)
+{
+ const struct exception_table_entry *_elt = elt;
+ unsigned long _key = *(unsigned long *)key;
+
+ /* avoid overflow */
+ if (_key > ex_to_insn(_elt))
+ return 1;
+ if (_key < ex_to_insn(_elt))
+ return -1;
+ return 0;
+}
+
+/*
+ * Search one exception table for an entry corresponding to the
+ * given instruction address, and return the address of the entry,
+ * or NULL if none is found.
+ * We use a binary search, and thus we assume that the table is
+ * already sorted.
+ */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *base,
+ const size_t num,
+ unsigned long value)
+{
+ return bsearch(&value, base, num,
+ sizeof(struct exception_table_entry), cmp_ex_search);
+}
diff --git a/lib/fault-inject-usercopy.c b/lib/fault-inject-usercopy.c
new file mode 100644
index 000000000..77558b6c2
--- /dev/null
+++ b/lib/fault-inject-usercopy.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/fault-inject.h>
+#include <linux/fault-inject-usercopy.h>
+
+static struct {
+ struct fault_attr attr;
+} fail_usercopy = {
+ .attr = FAULT_ATTR_INITIALIZER,
+};
+
+static int __init setup_fail_usercopy(char *str)
+{
+ return setup_fault_attr(&fail_usercopy.attr, str);
+}
+__setup("fail_usercopy=", setup_fail_usercopy);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_usercopy_debugfs(void)
+{
+ struct dentry *dir;
+
+ dir = fault_create_debugfs_attr("fail_usercopy", NULL,
+ &fail_usercopy.attr);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ return 0;
+}
+
+late_initcall(fail_usercopy_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+bool should_fail_usercopy(void)
+{
+ return should_fail(&fail_usercopy.attr, 1);
+}
+EXPORT_SYMBOL_GPL(should_fail_usercopy);
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
new file mode 100644
index 000000000..d608f9b48
--- /dev/null
+++ b/lib/fault-inject.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/stacktrace.h>
+#include <linux/fault-inject.h>
+
+/*
+ * setup_fault_attr() is a helper function for various __setup handlers, so it
+ * returns 0 on error, because that is what __setup handlers do.
+ */
+int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ unsigned long probability;
+ unsigned long interval;
+ int times;
+ int space;
+
+ /* "<interval>,<probability>,<space>,<times>" */
+ if (sscanf(str, "%lu,%lu,%d,%d",
+ &interval, &probability, &space, &times) < 4) {
+ printk(KERN_WARNING
+ "FAULT_INJECTION: failed to parse arguments\n");
+ return 0;
+ }
+
+ attr->probability = probability;
+ attr->interval = interval;
+ atomic_set(&attr->times, times);
+ atomic_set(&attr->space, space);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(setup_fault_attr);
+
+static void fail_dump(struct fault_attr *attr)
+{
+ if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
+ printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
+ "name %pd, interval %lu, probability %lu, "
+ "space %d, times %d\n", attr->dname,
+ attr->interval, attr->probability,
+ atomic_read(&attr->space),
+ atomic_read(&attr->times));
+ if (attr->verbose > 1)
+ dump_stack();
+ }
+}
+
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+
+static bool fail_task(struct fault_attr *attr, struct task_struct *task)
+{
+ return in_task() && task->make_it_fail;
+}
+
+#define MAX_STACK_TRACE_DEPTH 32
+
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+
+static bool fail_stacktrace(struct fault_attr *attr)
+{
+ int depth = attr->stacktrace_depth;
+ unsigned long entries[MAX_STACK_TRACE_DEPTH];
+ int n, nr_entries;
+ bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
+
+ if (depth == 0 || (found && !attr->reject_start && !attr->reject_end))
+ return found;
+
+ nr_entries = stack_trace_save(entries, depth, 1);
+ for (n = 0; n < nr_entries; n++) {
+ if (attr->reject_start <= entries[n] &&
+ entries[n] < attr->reject_end)
+ return false;
+ if (attr->require_start <= entries[n] &&
+ entries[n] < attr->require_end)
+ found = true;
+ }
+ return found;
+}
+
+#else
+
+static inline bool fail_stacktrace(struct fault_attr *attr)
+{
+ return true;
+}
+
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+
+/*
+ * This code is stolen from failmalloc-1.0
+ * http://www.nongnu.org/failmalloc/
+ */
+
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ bool stack_checked = false;
+
+ if (in_task()) {
+ unsigned int fail_nth = READ_ONCE(current->fail_nth);
+
+ if (fail_nth) {
+ if (!fail_stacktrace(attr))
+ return false;
+
+ stack_checked = true;
+ fail_nth--;
+ WRITE_ONCE(current->fail_nth, fail_nth);
+ if (!fail_nth)
+ goto fail;
+
+ return false;
+ }
+ }
+
+ /* No need to check any other properties if the probability is 0 */
+ if (attr->probability == 0)
+ return false;
+
+ if (attr->task_filter && !fail_task(attr, current))
+ return false;
+
+ if (atomic_read(&attr->times) == 0)
+ return false;
+
+ if (!stack_checked && !fail_stacktrace(attr))
+ return false;
+
+ if (atomic_read(&attr->space) > size) {
+ atomic_sub(size, &attr->space);
+ return false;
+ }
+
+ if (attr->interval > 1) {
+ attr->count++;
+ if (attr->count % attr->interval)
+ return false;
+ }
+
+ if (attr->probability <= get_random_u32_below(100))
+ return false;
+
+fail:
+ if (!(flags & FAULT_NOWARN))
+ fail_dump(attr);
+
+ if (atomic_read(&attr->times) != -1)
+ atomic_dec_not_zero(&attr->times);
+
+ return true;
+}
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return should_fail_ex(attr, size, 0);
+}
+EXPORT_SYMBOL_GPL(should_fail);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int debugfs_ul_set(void *data, u64 val)
+{
+ *(unsigned long *)data = val;
+ return 0;
+}
+
+static int debugfs_ul_get(void *data, u64 *val)
+{
+ *val = *(unsigned long *)data;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
+
+static void debugfs_create_ul(const char *name, umode_t mode,
+ struct dentry *parent, unsigned long *value)
+{
+ debugfs_create_file(name, mode, parent, value, &fops_ul);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+
+static int debugfs_stacktrace_depth_set(void *data, u64 val)
+{
+ *(unsigned long *)data =
+ min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
+ debugfs_stacktrace_depth_set, "%llu\n");
+
+static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
+}
+
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+
+struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr)
+{
+ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(name, parent);
+ if (IS_ERR(dir))
+ return dir;
+
+ debugfs_create_ul("probability", mode, dir, &attr->probability);
+ debugfs_create_ul("interval", mode, dir, &attr->interval);
+ debugfs_create_atomic_t("times", mode, dir, &attr->times);
+ debugfs_create_atomic_t("space", mode, dir, &attr->space);
+ debugfs_create_ul("verbose", mode, dir, &attr->verbose);
+ debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
+ &attr->ratelimit_state.interval);
+ debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
+ &attr->ratelimit_state.burst);
+ debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
+
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+ debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
+ &attr->stacktrace_depth);
+ debugfs_create_xul("require-start", mode, dir, &attr->require_start);
+ debugfs_create_xul("require-end", mode, dir, &attr->require_end);
+ debugfs_create_xul("reject-start", mode, dir, &attr->reject_start);
+ debugfs_create_xul("reject-end", mode, dir, &attr->reject_end);
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+
+ attr->dname = dget(dir);
+ return dir;
+}
+EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
+
+/* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */
+
+static ssize_t fault_uint_attr_show(unsigned int val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t fault_ulong_attr_show(unsigned long val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n", val);
+}
+
+static ssize_t fault_bool_attr_show(bool val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val));
+}
+
+static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count)
+{
+ unsigned int tmp;
+ int result;
+
+ result = kstrtouint(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count)
+{
+ bool tmp;
+ int result;
+
+ result = kstrtobool(page, &tmp);
+ if (result < 0)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count)
+{
+ int tmp;
+ int result;
+
+ result = kstrtoint(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ atomic_set(val, tmp);
+ return count;
+}
+
+#define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \
+static struct configfs_attribute _pfx##attr_##_name = { \
+ .ca_name = _attr_name, \
+ .ca_mode = 0644, \
+ .ca_owner = THIS_MODULE, \
+ .show = _pfx##_name##_show, \
+ .store = _pfx##_name##_store, \
+}
+
+static struct fault_config *to_fault_config(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct fault_config, group);
+}
+
+#define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \
+static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \
+} \
+static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \
+{ \
+ struct fault_config *config = to_fault_config(item); \
+ return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \
+} \
+CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME)
+
+#define FAULT_CONFIGFS_ATTR(NAME, TYPE) \
+ FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE)
+
+FAULT_CONFIGFS_ATTR(probability, ulong);
+FAULT_CONFIGFS_ATTR(interval, ulong);
+FAULT_CONFIGFS_ATTR(times, atomic_t);
+FAULT_CONFIGFS_ATTR(space, atomic_t);
+FAULT_CONFIGFS_ATTR(verbose, ulong);
+FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms",
+ ratelimit_state.interval, uint);
+FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst",
+ ratelimit_state.burst, uint);
+FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool);
+
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+
+static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page)
+{
+ return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page);
+}
+
+static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result < 0)
+ return result;
+
+ to_fault_config(item)->attr.stacktrace_depth =
+ min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH);
+
+ return count;
+}
+
+CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth");
+
+static ssize_t fault_xul_attr_show(unsigned long val, char *page)
+{
+ return snprintf(page, PAGE_SIZE,
+ sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val);
+}
+
+static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count)
+{
+ return fault_ulong_attr_store(val, page, count);
+}
+
+FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul);
+FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul);
+FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul);
+FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul);
+
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+
+static struct configfs_attribute *fault_config_attrs[] = {
+ &fault_attr_probability,
+ &fault_attr_interval,
+ &fault_attr_times,
+ &fault_attr_space,
+ &fault_attr_verbose,
+ &fault_attr_ratelimit_interval,
+ &fault_attr_ratelimit_burst,
+ &fault_attr_task_filter,
+#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
+ &fault_attr_stacktrace_depth,
+ &fault_attr_require_start,
+ &fault_attr_require_end,
+ &fault_attr_reject_start,
+ &fault_attr_reject_end,
+#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
+ NULL,
+};
+
+static const struct config_item_type fault_config_type = {
+ .ct_attrs = fault_config_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+void fault_config_init(struct fault_config *config, const char *name)
+{
+ config_group_init_type_name(&config->group, name, &fault_config_type);
+}
+EXPORT_SYMBOL_GPL(fault_config_init);
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
diff --git a/lib/fdt.c b/lib/fdt.c
new file mode 100644
index 000000000..97f20069f
--- /dev/null
+++ b/lib/fdt.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt.c"
diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c
new file mode 100644
index 000000000..23610bcf3
--- /dev/null
+++ b/lib/fdt_addresses.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
diff --git a/lib/fdt_empty_tree.c b/lib/fdt_empty_tree.c
new file mode 100644
index 000000000..5d30c5815
--- /dev/null
+++ b/lib/fdt_empty_tree.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_empty_tree.c"
diff --git a/lib/fdt_ro.c b/lib/fdt_ro.c
new file mode 100644
index 000000000..f73c04ea7
--- /dev/null
+++ b/lib/fdt_ro.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_ro.c"
diff --git a/lib/fdt_rw.c b/lib/fdt_rw.c
new file mode 100644
index 000000000..0c1f0f4a4
--- /dev/null
+++ b/lib/fdt_rw.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_rw.c"
diff --git a/lib/fdt_strerror.c b/lib/fdt_strerror.c
new file mode 100644
index 000000000..8713e3ff4
--- /dev/null
+++ b/lib/fdt_strerror.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_strerror.c"
diff --git a/lib/fdt_sw.c b/lib/fdt_sw.c
new file mode 100644
index 000000000..9ac7e50c7
--- /dev/null
+++ b/lib/fdt_sw.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_sw.c"
diff --git a/lib/fdt_wip.c b/lib/fdt_wip.c
new file mode 100644
index 000000000..45b3fc3d3
--- /dev/null
+++ b/lib/fdt_wip.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_wip.c"
diff --git a/lib/find_bit.c b/lib/find_bit.c
new file mode 100644
index 000000000..32f99e9a6
--- /dev/null
+++ b/lib/find_bit.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* bit search implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Copyright (C) 2008 IBM Corporation
+ * 'find_last_bit' is written by Rusty Russell <rusty@rustcorp.com.au>
+ * (Inspired by David Howell's find_next_bit implementation)
+ *
+ * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
+ * size and improve performance, 2015.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/swab.h>
+
+/*
+ * Common helper for find_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ */
+#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
+({ \
+ unsigned long idx, val, sz = (size); \
+ \
+ for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
+ val = (FETCH); \
+ if (val) { \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
+ break; \
+ } \
+ } \
+ \
+ sz; \
+})
+
+/*
+ * Common helper for find_next_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ */
+#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
+({ \
+ unsigned long mask, idx, tmp, sz = (size), __start = (start); \
+ \
+ if (unlikely(__start >= sz)) \
+ goto out; \
+ \
+ mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
+ idx = __start / BITS_PER_LONG; \
+ \
+ for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
+ if ((idx + 1) * BITS_PER_LONG >= sz) \
+ goto out; \
+ idx++; \
+ } \
+ \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
+out: \
+ sz; \
+})
+
+#define FIND_NTH_BIT(FETCH, size, num) \
+({ \
+ unsigned long sz = (size), nr = (num), idx, w, tmp; \
+ \
+ for (idx = 0; (idx + 1) * BITS_PER_LONG <= sz; idx++) { \
+ if (idx * BITS_PER_LONG + nr >= sz) \
+ goto out; \
+ \
+ tmp = (FETCH); \
+ w = hweight_long(tmp); \
+ if (w > nr) \
+ goto found; \
+ \
+ nr -= w; \
+ } \
+ \
+ if (sz % BITS_PER_LONG) \
+ tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
+found: \
+ sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
+out: \
+ sz; \
+})
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ return FIND_FIRST_BIT(addr[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_bit);
+#endif
+
+#ifndef find_first_and_bit
+/*
+ * Find the first set bit in two memory regions.
+ */
+unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_and_bit);
+#endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_zero_bit);
+#endif
+
+#ifndef find_next_bit
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_bit);
+#endif
+
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_bit);
+
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & addr2[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_and_bit);
+
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & ~addr2[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_andnot_bit);
+
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ return FIND_NTH_BIT(addr1[idx] & addr2[idx] & ~addr3[idx], size, n);
+}
+EXPORT_SYMBOL(__find_nth_and_andnot_bit);
+
+#ifndef find_next_and_bit
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_and_bit);
+#endif
+
+#ifndef find_next_andnot_bit
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] & ~addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_andnot_bit);
+#endif
+
+#ifndef find_next_or_bit
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] | addr2[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_or_bit);
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start)
+{
+ return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
+}
+EXPORT_SYMBOL(_find_next_zero_bit);
+#endif
+
+#ifndef find_last_bit
+unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (size) {
+ unsigned long val = BITMAP_LAST_WORD_MASK(size);
+ unsigned long idx = (size-1) / BITS_PER_LONG;
+
+ do {
+ val &= addr[idx];
+ if (val)
+ return idx * BITS_PER_LONG + __fls(val);
+
+ val = ~0ul;
+ } while (idx--);
+ }
+ return size;
+}
+EXPORT_SYMBOL(_find_last_bit);
+#endif
+
+unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ offset = find_next_bit(addr, size, offset);
+ if (offset == size)
+ return size;
+
+ offset = round_down(offset, 8);
+ *clump = bitmap_get_value8(addr, offset);
+
+ return offset;
+}
+EXPORT_SYMBOL(find_next_clump8);
+
+#ifdef __BIG_ENDIAN
+
+#ifndef find_first_zero_bit_le
+/*
+ * Find the first cleared bit in an LE memory region.
+ */
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size)
+{
+ return FIND_FIRST_BIT(~addr[idx], swab, size);
+}
+EXPORT_SYMBOL(_find_first_zero_bit_le);
+
+#endif
+
+#ifndef find_next_zero_bit_le
+unsigned long _find_next_zero_bit_le(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return FIND_NEXT_BIT(~addr[idx], swab, size, offset);
+}
+EXPORT_SYMBOL(_find_next_zero_bit_le);
+#endif
+
+#ifndef find_next_bit_le
+unsigned long _find_next_bit_le(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return FIND_NEXT_BIT(addr[idx], swab, size, offset);
+}
+EXPORT_SYMBOL(_find_next_bit_le);
+
+#endif
+
+#endif /* __BIG_ENDIAN */
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
new file mode 100644
index 000000000..d3fb09e6e
--- /dev/null
+++ b/lib/find_bit_benchmark.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test for find_*_bit functions.
+ *
+ * Copyright (c) 2017 Cavium.
+ */
+
+/*
+ * find_bit functions are widely used in kernel, so the successful boot
+ * is good enough test for correctness.
+ *
+ * This test is focused on performance of traversing bitmaps. Two typical
+ * scenarios are reproduced:
+ * - randomly filled bitmap with approximately equal number of set and
+ * cleared bits;
+ * - sparse bitmap with few set bits at random positions.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+
+#define BITMAP_LEN (4096UL * 8 * 10)
+#define SPARSE 500
+
+static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
+static DECLARE_BITMAP(bitmap2, BITMAP_LEN) __initdata;
+
+/*
+ * This is Schlemiel the Painter's algorithm. It should be called after
+ * all other tests for the same bitmap because it sets all bits of bitmap to 1.
+ */
+static int __init test_find_first_bit(void *bitmap, unsigned long len)
+{
+ unsigned long i, cnt;
+ ktime_t time;
+
+ time = ktime_get();
+ for (cnt = i = 0; i < len; cnt++) {
+ i = find_first_bit(bitmap, len);
+ __clear_bit(i, bitmap);
+ }
+ time = ktime_get() - time;
+ pr_err("find_first_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len)
+{
+ static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata;
+ unsigned long i, cnt;
+ ktime_t time;
+
+ bitmap_copy(cp, bitmap, BITMAP_LEN);
+
+ time = ktime_get();
+ for (cnt = i = 0; i < len; cnt++) {
+ i = find_first_and_bit(cp, bitmap2, len);
+ __clear_bit(i, cp);
+ }
+ time = ktime_get() - time;
+ pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init test_find_next_bit(const void *bitmap, unsigned long len)
+{
+ unsigned long i, cnt;
+ ktime_t time;
+
+ time = ktime_get();
+ for (cnt = i = 0; i < BITMAP_LEN; cnt++)
+ i = find_next_bit(bitmap, BITMAP_LEN, i) + 1;
+ time = ktime_get() - time;
+ pr_err("find_next_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
+{
+ unsigned long i, cnt;
+ ktime_t time;
+
+ time = ktime_get();
+ for (cnt = i = 0; i < BITMAP_LEN; cnt++)
+ i = find_next_zero_bit(bitmap, len, i) + 1;
+ time = ktime_get() - time;
+ pr_err("find_next_zero_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init test_find_last_bit(const void *bitmap, unsigned long len)
+{
+ unsigned long l, cnt = 0;
+ ktime_t time;
+
+ time = ktime_get();
+ do {
+ cnt++;
+ l = find_last_bit(bitmap, len);
+ if (l >= len)
+ break;
+ len = l;
+ } while (len);
+ time = ktime_get() - time;
+ pr_err("find_last_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init test_find_nth_bit(const unsigned long *bitmap, unsigned long len)
+{
+ unsigned long l, n, w = bitmap_weight(bitmap, len);
+ ktime_t time;
+
+ time = ktime_get();
+ for (n = 0; n < w; n++) {
+ l = find_nth_bit(bitmap, len, n);
+ WARN_ON(l >= len);
+ }
+ time = ktime_get() - time;
+ pr_err("find_nth_bit: %18llu ns, %6ld iterations\n", time, w);
+
+ return 0;
+}
+
+static int __init test_find_next_and_bit(const void *bitmap,
+ const void *bitmap2, unsigned long len)
+{
+ unsigned long i, cnt;
+ ktime_t time;
+
+ time = ktime_get();
+ for (cnt = i = 0; i < BITMAP_LEN; cnt++)
+ i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1);
+ time = ktime_get() - time;
+ pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
+
+ return 0;
+}
+
+static int __init find_bit_test(void)
+{
+ unsigned long nbits = BITMAP_LEN / SPARSE;
+
+ pr_err("\nStart testing find_bit() with random-filled bitmap\n");
+
+ get_random_bytes(bitmap, sizeof(bitmap));
+ get_random_bytes(bitmap2, sizeof(bitmap2));
+
+ test_find_next_bit(bitmap, BITMAP_LEN);
+ test_find_next_zero_bit(bitmap, BITMAP_LEN);
+ test_find_last_bit(bitmap, BITMAP_LEN);
+ test_find_nth_bit(bitmap, BITMAP_LEN / 10);
+
+ /*
+ * test_find_first_bit() may take some time, so
+ * traverse only part of bitmap to avoid soft lockup.
+ */
+ test_find_first_bit(bitmap, BITMAP_LEN / 10);
+ test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2);
+ test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
+
+ pr_err("\nStart testing find_bit() with sparse bitmap\n");
+
+ bitmap_zero(bitmap, BITMAP_LEN);
+ bitmap_zero(bitmap2, BITMAP_LEN);
+
+ while (nbits--) {
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap2);
+ }
+
+ test_find_next_bit(bitmap, BITMAP_LEN);
+ test_find_next_zero_bit(bitmap, BITMAP_LEN);
+ test_find_last_bit(bitmap, BITMAP_LEN);
+ test_find_nth_bit(bitmap, BITMAP_LEN);
+ test_find_first_bit(bitmap, BITMAP_LEN);
+ test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
+ test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
+
+ /*
+ * Everything is OK. Return error just to let user run benchmark
+ * again without annoying rmmod.
+ */
+ return -EINVAL;
+}
+module_init(find_bit_test);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
new file mode 100644
index 000000000..83332fefa
--- /dev/null
+++ b/lib/flex_proportions.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Floating proportions with flexible aging period
+ *
+ * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
+ *
+ * The goal of this code is: Given different types of event, measure proportion
+ * of each type of event over time. The proportions are measured with
+ * exponentially decaying history to give smooth transitions. A formula
+ * expressing proportion of event of type 'j' is:
+ *
+ * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
+ *
+ * Where x_{i,j} is j's number of events in i-th last time period and x_i is
+ * total number of events in i-th last time period.
+ *
+ * Note that p_{j}'s are normalised, i.e.
+ *
+ * \Sum_{j} p_{j} = 1,
+ *
+ * This formula can be straightforwardly computed by maintaining denominator
+ * (let's call it 'd') and for each event type its numerator (let's call it
+ * 'n_j'). When an event of type 'j' happens, we simply need to do:
+ * n_j++; d++;
+ *
+ * When a new period is declared, we could do:
+ * d /= 2
+ * for each j
+ * n_j /= 2
+ *
+ * To avoid iteration over all event types, we instead shift numerator of event
+ * j lazily when someone asks for a proportion of event j or when event j
+ * occurs. This can bit trivially implemented by remembering last period in
+ * which something happened with proportion of type j.
+ */
+#include <linux/flex_proportions.h>
+
+int fprop_global_init(struct fprop_global *p, gfp_t gfp)
+{
+ int err;
+
+ p->period = 0;
+ /* Use 1 to avoid dealing with periods with 0 events... */
+ err = percpu_counter_init(&p->events, 1, gfp);
+ if (err)
+ return err;
+ seqcount_init(&p->sequence);
+ return 0;
+}
+
+void fprop_global_destroy(struct fprop_global *p)
+{
+ percpu_counter_destroy(&p->events);
+}
+
+/*
+ * Declare @periods new periods. It is upto the caller to make sure period
+ * transitions cannot happen in parallel.
+ *
+ * The function returns true if the proportions are still defined and false
+ * if aging zeroed out all events. This can be used to detect whether declaring
+ * further periods has any effect.
+ */
+bool fprop_new_period(struct fprop_global *p, int periods)
+{
+ s64 events = percpu_counter_sum(&p->events);
+
+ /*
+ * Don't do anything if there are no events.
+ */
+ if (events <= 1)
+ return false;
+ preempt_disable_nested();
+ write_seqcount_begin(&p->sequence);
+ if (periods < 64)
+ events -= events >> periods;
+ /* Use addition to avoid losing events happening between sum and set */
+ percpu_counter_add(&p->events, -events);
+ p->period += periods;
+ write_seqcount_end(&p->sequence);
+ preempt_enable_nested();
+
+ return true;
+}
+
+/*
+ * ---- SINGLE ----
+ */
+
+int fprop_local_init_single(struct fprop_local_single *pl)
+{
+ pl->events = 0;
+ pl->period = 0;
+ raw_spin_lock_init(&pl->lock);
+ return 0;
+}
+
+void fprop_local_destroy_single(struct fprop_local_single *pl)
+{
+}
+
+static void fprop_reflect_period_single(struct fprop_global *p,
+ struct fprop_local_single *pl)
+{
+ unsigned int period = p->period;
+ unsigned long flags;
+
+ /* Fast path - period didn't change */
+ if (pl->period == period)
+ return;
+ raw_spin_lock_irqsave(&pl->lock, flags);
+ /* Someone updated pl->period while we were spinning? */
+ if (pl->period >= period) {
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ return;
+ }
+ /* Aging zeroed our fraction? */
+ if (period - pl->period < BITS_PER_LONG)
+ pl->events >>= period - pl->period;
+ else
+ pl->events = 0;
+ pl->period = period;
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+}
+
+/* Event of type pl happened */
+void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
+{
+ fprop_reflect_period_single(p, pl);
+ pl->events++;
+ percpu_counter_add(&p->events, 1);
+}
+
+/* Return fraction of events of type pl */
+void fprop_fraction_single(struct fprop_global *p,
+ struct fprop_local_single *pl,
+ unsigned long *numerator, unsigned long *denominator)
+{
+ unsigned int seq;
+ s64 num, den;
+
+ do {
+ seq = read_seqcount_begin(&p->sequence);
+ fprop_reflect_period_single(p, pl);
+ num = pl->events;
+ den = percpu_counter_read_positive(&p->events);
+ } while (read_seqcount_retry(&p->sequence, seq));
+
+ /*
+ * Make fraction <= 1 and denominator > 0 even in presence of percpu
+ * counter errors
+ */
+ if (den <= num) {
+ if (num)
+ den = num;
+ else
+ den = 1;
+ }
+ *denominator = den;
+ *numerator = num;
+}
+
+/*
+ * ---- PERCPU ----
+ */
+#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
+
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
+{
+ int err;
+
+ err = percpu_counter_init(&pl->events, 0, gfp);
+ if (err)
+ return err;
+ pl->period = 0;
+ raw_spin_lock_init(&pl->lock);
+ return 0;
+}
+
+void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
+{
+ percpu_counter_destroy(&pl->events);
+}
+
+static void fprop_reflect_period_percpu(struct fprop_global *p,
+ struct fprop_local_percpu *pl)
+{
+ unsigned int period = p->period;
+ unsigned long flags;
+
+ /* Fast path - period didn't change */
+ if (pl->period == period)
+ return;
+ raw_spin_lock_irqsave(&pl->lock, flags);
+ /* Someone updated pl->period while we were spinning? */
+ if (pl->period >= period) {
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ return;
+ }
+ /* Aging zeroed our fraction? */
+ if (period - pl->period < BITS_PER_LONG) {
+ s64 val = percpu_counter_read(&pl->events);
+
+ if (val < (nr_cpu_ids * PROP_BATCH))
+ val = percpu_counter_sum(&pl->events);
+
+ percpu_counter_add_batch(&pl->events,
+ -val + (val >> (period-pl->period)), PROP_BATCH);
+ } else
+ percpu_counter_set(&pl->events, 0);
+ pl->period = period;
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+}
+
+/* Event of type pl happened */
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr)
+{
+ fprop_reflect_period_percpu(p, pl);
+ percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
+ percpu_counter_add(&p->events, nr);
+}
+
+void fprop_fraction_percpu(struct fprop_global *p,
+ struct fprop_local_percpu *pl,
+ unsigned long *numerator, unsigned long *denominator)
+{
+ unsigned int seq;
+ s64 num, den;
+
+ do {
+ seq = read_seqcount_begin(&p->sequence);
+ fprop_reflect_period_percpu(p, pl);
+ num = percpu_counter_read_positive(&pl->events);
+ den = percpu_counter_read_positive(&p->events);
+ } while (read_seqcount_retry(&p->sequence, seq));
+
+ /*
+ * Make fraction <= 1 and denominator > 0 even in presence of percpu
+ * counter errors
+ */
+ if (den <= num) {
+ if (num)
+ den = num;
+ else
+ den = 1;
+ }
+ *denominator = den;
+ *numerator = num;
+}
+
+/*
+ * Like __fprop_add_percpu() except that event is counted only if the given
+ * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
+ */
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr)
+{
+ if (unlikely(max_frac < FPROP_FRAC_BASE)) {
+ unsigned long numerator, denominator;
+ s64 tmp;
+
+ fprop_fraction_percpu(p, pl, &numerator, &denominator);
+ /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
+ tmp = (u64)denominator * max_frac -
+ ((u64)numerator << FPROP_FRAC_SHIFT);
+ if (tmp < 0) {
+ /* Maximum fraction already exceeded? */
+ return;
+ } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
+ /* Add just enough for the fraction to saturate */
+ nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
+ FPROP_FRAC_BASE - max_frac);
+ }
+ }
+
+ __fprop_add_percpu(p, pl, nr);
+}
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
new file mode 100644
index 000000000..c035fde66
--- /dev/null
+++ b/lib/fonts/Kconfig
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Font configuration
+#
+
+config FONT_SUPPORT
+ tristate
+
+if FONT_SUPPORT
+
+config FONTS
+ bool "Select compiled-in fonts"
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+ help
+ Say Y here if you would like to use fonts other than the default
+ your frame buffer console usually use.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about foreign fonts.
+
+ If unsure, say N (the default choices are safe).
+
+config FONT_8x8
+ bool "VGA 8x8 font" if FONTS
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+ default y if !SPARC && !FONTS
+ help
+ This is the "high resolution" font for the VGA frame buffer (the one
+ provided by the text console 80x50 (and higher) modes).
+
+ Note that this is a poor quality font. The VGA 8x16 font is quite a
+ lot more readable.
+
+ Given the resolution provided by the frame buffer device, answer N
+ here is safe.
+
+config FONT_8x16
+ bool "VGA 8x16 font" if FONTS
+ default y if !SPARC && !FONTS
+ help
+ This is the "high resolution" font for the VGA frame buffer (the one
+ provided by the VGA text console 80x25 mode.
+
+ If unsure, say Y.
+
+config FONT_6x11
+ bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
+ default y if !SPARC && !FONTS && MAC
+ help
+ Small console font with Macintosh-style high-half glyphs. Some Mac
+ framebuffer drivers don't support this one at all.
+
+config FONT_7x14
+ bool "console 7x14 font (not supported by all drivers)" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ help
+ Console font with characters just a bit smaller than the default.
+ If the standard 8x16 font is a little too big for you, say Y.
+ Otherwise, say N.
+
+config FONT_PEARL_8x8
+ bool "Pearl (old m68k) console 8x8 font" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ default y if !SPARC && !FONTS && AMIGA
+ help
+ Small console font with PC-style control-character and high-half
+ glyphs.
+
+config FONT_ACORN_8x8
+ bool "Acorn console 8x8 font" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ default y if !SPARC && !FONTS && ARM && ARCH_ACORN
+ help
+ Small console font with PC-style control characters and high-half
+ glyphs.
+
+config FONT_MINI_4x6
+ bool "Mini 4x6 font"
+ depends on !SPARC && FONTS
+
+config FONT_6x10
+ bool "Medium-size 6x10 font"
+ depends on !SPARC && FONTS
+ help
+ Medium-size console font. Suitable for framebuffer consoles on
+ embedded devices with a 320x240 screen, to get a reasonable number
+ of characters (53x24) that are still at a readable size.
+
+config FONT_10x18
+ bool "console 10x18 font (not supported by all drivers)" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ help
+ This is a high resolution console font for machines with very
+ big letters. It fits between the sun 12x22 and the normal 8x16 font.
+ If other fonts are too big or too small for you, say Y, otherwise say N.
+
+config FONT_SUN8x16
+ bool "Sparc console 8x16 font"
+ depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ help
+ This is the high resolution console font for Sun machines. Say Y.
+
+config FONT_SUN12x22
+ bool "Sparc console 12x22 font (not supported by all drivers)"
+ depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ help
+ This is the high resolution console font for Sun machines with very
+ big letters (like the letters used in the SPARC PROM). If the
+ standard font is unreadable for you, say Y, otherwise say N.
+
+config FONT_TER16x32
+ bool "Terminus 16x32 font (not supported by all drivers)"
+ depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ help
+ Terminus Font is a clean, fixed width bitmap font, designed
+ for long (8 and more hours per day) work with computers.
+ This is the high resolution, large version for use with HiDPI screens.
+ If the standard font is unreadable for you, say Y, otherwise say N.
+
+config FONT_6x8
+ bool "OLED 6x8 font" if FONTS
+ depends on FRAMEBUFFER_CONSOLE
+ help
+ This font is useful for small displays (OLED).
+
+config FONT_AUTOSELECT
+ def_bool y
+ depends on !FONT_8x8
+ depends on !FONT_6x11
+ depends on !FONT_7x14
+ depends on !FONT_PEARL_8x8
+ depends on !FONT_ACORN_8x8
+ depends on !FONT_MINI_4x6
+ depends on !FONT_6x10
+ depends on !FONT_SUN8x16
+ depends on !FONT_SUN12x22
+ depends on !FONT_10x18
+ depends on !FONT_TER16x32
+ depends on !FONT_6x8
+ select FONT_8x16
+
+endif # FONT_SUPPORT
diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile
new file mode 100644
index 000000000..e16f68492
--- /dev/null
+++ b/lib/fonts/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+# Font handling
+
+font-objs := fonts.o
+
+font-objs-$(CONFIG_FONT_SUN8x16) += font_sun8x16.o
+font-objs-$(CONFIG_FONT_SUN12x22) += font_sun12x22.o
+font-objs-$(CONFIG_FONT_8x8) += font_8x8.o
+font-objs-$(CONFIG_FONT_8x16) += font_8x16.o
+font-objs-$(CONFIG_FONT_6x11) += font_6x11.o
+font-objs-$(CONFIG_FONT_7x14) += font_7x14.o
+font-objs-$(CONFIG_FONT_10x18) += font_10x18.o
+font-objs-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o
+font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o
+font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
+font-objs-$(CONFIG_FONT_6x10) += font_6x10.o
+font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o
+font-objs-$(CONFIG_FONT_6x8) += font_6x8.o
+
+font-objs += $(font-objs-y)
+
+obj-$(CONFIG_FONT_SUPPORT) += font.o
diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c
new file mode 100644
index 000000000..5d940db62
--- /dev/null
+++ b/lib/fonts/font_10x18.c
@@ -0,0 +1,5147 @@
+// SPDX-License-Identifier: GPL-2.0
+/********************************
+ * adapted from font_sun12x22.c *
+ * by Jurriaan Kalkman 06-2005 *
+ ********************************/
+
+#include <linux/font.h>
+
+#define FONTDATAMAX 9216
+
+static const struct font_data fontdata_10x18 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 1 0x01 '^A' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x40, 0x40, /* 0100000001 */
+ 0x5b, 0x40, /* 0101101101 */
+ 0x40, 0x40, /* 0100000001 */
+ 0x44, 0x40, /* 0100010001 */
+ 0x44, 0x40, /* 0100010001 */
+ 0x51, 0x40, /* 0101000101 */
+ 0x4e, 0x40, /* 0100111001 */
+ 0x40, 0x40, /* 0100000001 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 2 0x02 '^B' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x64, 0xc0, /* 0110010011 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x7b, 0xc0, /* 0111101111 */
+ 0x7b, 0xc0, /* 0111101111 */
+ 0x6e, 0xc0, /* 0110111011 */
+ 0x71, 0xc0, /* 0111000111 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x11, 0x00, /* 0001000100 */
+ 0x3b, 0x80, /* 0011101110 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x7b, 0xc0, /* 0111101111 */
+ 0x35, 0x80, /* 0011010110 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x35, 0x80, /* 0011010110 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 8 0x08 '^H' */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0xe1, 0xc0, /* 1110000111 */
+ 0xe1, 0xc0, /* 1110000111 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xe1, 0xc0, /* 1110000111 */
+ 0xe1, 0xc0, /* 1110000111 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 10 0x0a '^J' */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0xed, 0xc0, /* 1110110111 */
+ 0xed, 0xc0, /* 1110110111 */
+ 0xde, 0xc0, /* 1101111011 */
+ 0xde, 0xc0, /* 1101111011 */
+ 0xed, 0xc0, /* 1110110111 */
+ 0xed, 0xc0, /* 1110110111 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+
+ /* 11 0x0b '^K' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x03, 0xc0, /* 0000001111 */
+ 0x06, 0xc0, /* 0000011011 */
+ 0x0c, 0xc0, /* 0000110011 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 12 0x0c '^L' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 13 0x0d '^M' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x08, 0x80, /* 0000100010 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 14 0x0e '^N' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x10, 0x80, /* 0001000010 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x10, 0x80, /* 0001000010 */
+ 0x10, 0x80, /* 0001000010 */
+ 0x10, 0x80, /* 0001000010 */
+ 0x10, 0x80, /* 0001000010 */
+ 0x13, 0x80, /* 0001001110 */
+ 0x17, 0x80, /* 0001011110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0xf0, 0x00, /* 1111000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 15 0x0f '^O' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x24, 0x80, /* 0010010010 */
+ 0x15, 0x00, /* 0001010100 */
+ 0x55, 0x40, /* 0101010101 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x55, 0x40, /* 0101010101 */
+ 0x15, 0x00, /* 0001010100 */
+ 0x24, 0x80, /* 0010010010 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 16 0x10 '^P' */
+ 0x00, 0x80, /* 0000000010 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0xff, 0x80, /* 1111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x00, 0x80, /* 0000000010 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x40, 0x00, /* 0100000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x7c, 0x00, /* 0111110000 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x7c, 0x00, /* 0111110000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 18 0x12 '^R' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 19 0x13 '^S' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 20 0x14 '^T' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x39, 0xc0, /* 0011100111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 21 0x15 '^U' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 22 0x16 '^V' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 23 0x17 '^W' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 24 0x18 '^X' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x73, 0x80, /* 0111001110 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 31 0x1f '^_' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 33 0x21 '!' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 34 0x22 '"' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x42, 0x00, /* 0100001000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 36 0x24 '$' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x6f, 0x80, /* 0110111110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6c, 0x80, /* 0110110010 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x0d, 0x80, /* 0000110110 */
+ 0x4d, 0x80, /* 0100110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x7b, 0x00, /* 0111101100 */
+ 0x7b, 0x00, /* 0111101100 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x37, 0x80, /* 0011011110 */
+ 0x37, 0x80, /* 0011011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 38 0x26 '&' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x76, 0x00, /* 0111011000 */
+ 0x66, 0x40, /* 0110011001 */
+ 0x63, 0xc0, /* 0110001111 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x1c, 0xc0, /* 0001110011 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 39 0x27 ''' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 40 0x28 '(' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 41 0x29 ')' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x4c, 0x80, /* 0100110010 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x4c, 0x80, /* 0100110010 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x40, 0x00, /* 0100000000 */
+
+ /* 45 0x2d '-' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 47 0x2f '/' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 48 0x30 '0' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x65, 0x80, /* 0110010110 */
+ 0x65, 0x80, /* 0110010110 */
+ 0x69, 0x80, /* 0110100110 */
+ 0x69, 0x80, /* 0110100110 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x61, 0x00, /* 0110000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 49 0x31 '1' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 50 0x32 '2' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x41, 0x80, /* 0100000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 51 0x33 '3' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x47, 0x00, /* 0100011100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x41, 0x80, /* 0100000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 52 0x34 '4' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc6, 0x00, /* 1100011000 */
+ 0xc6, 0x00, /* 1100011000 */
+ 0xff, 0x80, /* 1111111110 */
+ 0xff, 0x80, /* 1111111110 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 53 0x35 '5' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x67, 0x00, /* 0110011100 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x41, 0x80, /* 0100000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 54 0x36 '6' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x6e, 0x00, /* 0110111000 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x71, 0x00, /* 0111000100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 55 0x37 '7' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 56 0x38 '8' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x1a, 0x00, /* 0001101000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x16, 0x00, /* 0001011000 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 57 0x39 '9' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x17, 0x00, /* 0001011100 */
+ 0x23, 0x80, /* 0010001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0x80, /* 0011110110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x20, 0x00, /* 0010000000 */
+
+ /* 60 0x3c '<' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 62 0x3e '>' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 63 0x3f '?' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x3b, 0x80, /* 0011101110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 64 0x40 '@' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x65, 0x80, /* 0110010110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6f, 0x80, /* 0110111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 65 0x41 'A' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x60, 0xc0, /* 0110000011 */
+ 0x60, 0xc0, /* 0110000011 */
+ 0xf1, 0xc0, /* 1111000111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 66 0x42 'B' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x62, 0x00, /* 0110001000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0xfe, 0x00, /* 1111111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 67 0x43 'C' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x11, 0x80, /* 0001000110 */
+ 0x20, 0x80, /* 0010000010 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x19, 0x00, /* 0001100100 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 68 0x44 'D' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x67, 0x00, /* 0110011100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x00, /* 0110000100 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 69 0x45 'E' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 70 0x46 'F' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 71 0x47 'G' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x11, 0x80, /* 0001000110 */
+ 0x20, 0x80, /* 0010000010 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x67, 0xc0, /* 0110011111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 72 0x48 'H' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 73 0x49 'I' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 74 0x4a 'J' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 75 0x4b 'K' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf1, 0x80, /* 1111000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xf0, 0xc0, /* 1111000011 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 76 0x4c 'L' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 77 0x4d 'M' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xe0, 0xc0, /* 1110000011 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 78 0x4e 'N' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x67, 0x80, /* 0110011110 */
+ 0x67, 0x80, /* 0110011110 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 79 0x4f 'O' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x17, 0x00, /* 0001011100 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x1a, 0x00, /* 0001101000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 80 0x50 'P' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfe, 0x00, /* 1111111000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0xf0, 0x00, /* 1111000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 81 0x51 'Q' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x13, 0x00, /* 0001001100 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x3b, 0x00, /* 0011101100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x26, 0x00, /* 0010011000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 82 0x52 'R' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfe, 0x00, /* 1111111000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x00, /* 0110000100 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x6e, 0x00, /* 0110111000 */
+ 0x67, 0x00, /* 0110011100 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x61, 0xc0, /* 0110000111 */
+ 0xf0, 0xc0, /* 1111000011 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 83 0x53 'S' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x41, 0x80, /* 0100000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 84 0x54 'T' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x4c, 0x80, /* 0100110010 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 85 0x55 'U' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 86 0x56 'V' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xe1, 0xc0, /* 1110000111 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 87 0x57 'W' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xe1, 0xc0, /* 1110000111 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xe0, 0xc0, /* 1110000011 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x77, 0x00, /* 0111011100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 88 0x58 'X' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 89 0x59 'Y' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 90 0x5a 'Z' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 91 0x5b '[' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 92 0x5c '\' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xc0, 0x00, /* 1100000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x00, 0xc0, /* 0000000011 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 93 0x5d ']' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 94 0x5e '^' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 96 0x60 '`' */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0xc0, /* 0011110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 98 0x62 'b' */
+ 0x20, 0x00, /* 0010000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0xe0, 0x00, /* 1110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x6f, 0x00, /* 0110111100 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x7b, 0x00, /* 0111101100 */
+ 0x4e, 0x00, /* 0100111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x37, 0x00, /* 0011011100 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x71, 0x00, /* 0111000100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 100 0x64 'd' */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x0d, 0x80, /* 0000110110 */
+ 0x37, 0x80, /* 0011011110 */
+ 0x23, 0x80, /* 0010001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x35, 0x80, /* 0011010110 */
+ 0x19, 0xc0, /* 0001100111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 102 0x66 'f' */
+ 0x07, 0x00, /* 0000011100 */
+ 0x09, 0x80, /* 0000100110 */
+ 0x09, 0x80, /* 0000100110 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1c, 0x80, /* 0001110010 */
+ 0x37, 0x80, /* 0011011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x3e, 0x00, /* 0011111000 */
+
+ /* 104 0x68 'h' */
+ 0x10, 0x00, /* 0001000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x37, 0x00, /* 0011011100 */
+ 0x3b, 0x80, /* 0011101110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x7b, 0xc0, /* 0111101111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 105 0x69 'i' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 106 0x6a 'j' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x41, 0x80, /* 0100000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1c, 0x00, /* 0001110000 */
+
+ /* 107 0x6b 'k' */
+ 0x60, 0x00, /* 0110000000 */
+ 0xe0, 0x00, /* 1110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x6e, 0x00, /* 0110111000 */
+ 0x67, 0x00, /* 0110011100 */
+ 0xf3, 0x80, /* 1111001110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 108 0x6c 'l' */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xdb, 0x80, /* 1101101110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0xed, 0xc0, /* 1110110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x6f, 0x00, /* 0110111100 */
+ 0x7b, 0x80, /* 0111101110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x7b, 0xc0, /* 0111101111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xe1, 0x80, /* 1110000110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xde, 0x00, /* 1101111000 */
+ 0x76, 0x00, /* 0111011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x7b, 0x00, /* 0111101100 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0xf0, 0x00, /* 1111000000 */
+
+ /* 113 0x71 'q' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0xc0, /* 0000111011 */
+ 0x1b, 0x80, /* 0001101110 */
+ 0x33, 0x80, /* 0011001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x3b, 0x80, /* 0011101110 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0xc0, /* 0000001111 */
+
+ /* 114 0x72 'r' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x35, 0x80, /* 0011010110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 115 0x73 's' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x00, /* 0110000100 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x43, 0x00, /* 0100001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 116 0x74 't' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1c, 0x80, /* 0001110010 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x77, 0x00, /* 0111011100 */
+ 0x3d, 0x80, /* 0011110110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf1, 0xc0, /* 1111000111 */
+ 0x60, 0xc0, /* 0110000011 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xe3, 0xc0, /* 1110001111 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0x6b, 0x00, /* 0110101100 */
+ 0x6b, 0x00, /* 0110101100 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x70, 0x00, /* 0111000000 */
+
+ /* 122 0x7a 'z' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x43, 0x00, /* 0100001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 123 0x7b '{' */
+ 0x07, 0x00, /* 0000011100 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x70, 0x00, /* 0111000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 124 0x7c '|' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 125 0x7d '}' */
+ 0x38, 0x00, /* 0011100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 126 0x7e '~' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x80, /* 0001100010 */
+ 0x3d, 0x80, /* 0011110110 */
+ 0x6f, 0x00, /* 0110111100 */
+ 0x46, 0x00, /* 0100011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 127 0x7f '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x12, 0x00, /* 0001001000 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 128 0x80 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x02, 0x00, /* 0000001000 */
+ 0x02, 0x00, /* 0000001000 */
+ 0x1c, 0x00, /* 0001110000 */
+
+ /* 129 0x81 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7b, 0x80, /* 0111101110 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x3b, 0x00, /* 0011101100 */
+ 0x1c, 0x80, /* 0001110010 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 130 0x82 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x01, 0x00, /* 0000000100 */
+ 0x02, 0x00, /* 0000001000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 131 0x83 '.' */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0xc0, /* 0011110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 132 0x84 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0xc0, /* 0011110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 133 0x85 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0xc0, /* 0011110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 134 0x86 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0xc0, /* 0011110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 135 0x87 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x20, 0x80, /* 0010000010 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x70, 0x80, /* 0111000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x02, 0x00, /* 0000001000 */
+ 0x01, 0x00, /* 0000000100 */
+ 0x0e, 0x00, /* 0000111000 */
+
+ /* 136 0x88 '.' */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 137 0x89 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 138 0x8a '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x19, 0x80, /* 0001100110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 139 0x8b '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 140 0x8c '.' */
+ 0x08, 0x00, /* 0000100000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 141 0x8d '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 142 0x8e '.' */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x19, 0x00, /* 0001100100 */
+ 0x19, 0x00, /* 0001100100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 143 0x8f '.' */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0a, 0x00, /* 0000101000 */
+ 0x0a, 0x00, /* 0000101000 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x19, 0x00, /* 0001100100 */
+ 0x19, 0x00, /* 0001100100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 144 0x90 '.' */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 145 0x91 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3b, 0x80, /* 0011101110 */
+ 0x6c, 0xc0, /* 0110110011 */
+ 0x4c, 0xc0, /* 0100110011 */
+ 0x0c, 0xc0, /* 0000110011 */
+ 0x3f, 0xc0, /* 0011111111 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0xcc, 0x00, /* 1100110000 */
+ 0xcc, 0x00, /* 1100110000 */
+ 0xee, 0xc0, /* 1110111011 */
+ 0x7b, 0x80, /* 0111101110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 146 0x92 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x0e, 0x40, /* 0000111001 */
+ 0x0e, 0x40, /* 0000111001 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x16, 0x00, /* 0001011000 */
+ 0x16, 0x80, /* 0001011010 */
+ 0x17, 0x80, /* 0001011110 */
+ 0x16, 0x80, /* 0001011010 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x26, 0x00, /* 0010011000 */
+ 0x26, 0x00, /* 0010011000 */
+ 0x46, 0x40, /* 0100011001 */
+ 0x46, 0x40, /* 0100011001 */
+ 0xef, 0xc0, /* 1110111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 147 0x93 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xe1, 0x80, /* 1110000110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 148 0x94 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xe1, 0x80, /* 1110000110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 149 0x95 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xe1, 0x80, /* 1110000110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 150 0x96 '.' */
+ 0x08, 0x00, /* 0000100000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x77, 0x00, /* 0111011100 */
+ 0x3d, 0x80, /* 0011110110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 151 0x97 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x77, 0x00, /* 0111011100 */
+ 0x3d, 0x80, /* 0011110110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 152 0x98 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x78, 0x00, /* 0111100000 */
+ 0x70, 0x00, /* 0111000000 */
+
+ /* 153 0x99 '.' */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x17, 0x00, /* 0001011100 */
+ 0x23, 0x00, /* 0010001100 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x1a, 0x00, /* 0001101000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 154 0x9a '.' */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf1, 0xc0, /* 1111000111 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x71, 0x00, /* 0111000100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 155 0x9b '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x36, 0x80, /* 0011011010 */
+ 0x26, 0x00, /* 0010011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x76, 0x00, /* 0111011000 */
+ 0x36, 0x80, /* 0011011010 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 156 0x9c '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3b, 0x00, /* 0011101100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x7e, 0x00, /* 0111111000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x7c, 0x80, /* 0111110010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x43, 0x00, /* 0100001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 157 0x9d '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 158 0x9e '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0xbf, 0x00, /* 1011111100 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x40, 0x80, /* 0100000010 */
+ 0x7f, 0x00, /* 0111111100 */
+ 0x40, 0x00, /* 0100000000 */
+ 0x48, 0x00, /* 0100100000 */
+ 0x48, 0x00, /* 0100100000 */
+ 0x5e, 0x00, /* 0101111000 */
+ 0x48, 0x00, /* 0100100000 */
+ 0x48, 0x00, /* 0100100000 */
+ 0x48, 0x00, /* 0100100000 */
+ 0x48, 0x80, /* 0100100010 */
+ 0x47, 0x00, /* 0100011100 */
+ 0xe0, 0x00, /* 1110000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 159 0x9f '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x04, 0x80, /* 0000010010 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x09, 0x00, /* 0000100100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x48, 0x00, /* 0100100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x90, 0x00, /* 1001000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 160 0xa0 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x07, 0x80, /* 0000011110 */
+ 0x39, 0x80, /* 0011100110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3d, 0xc0, /* 0011110111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 161 0xa1 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 162 0xa2 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xc1, 0x80, /* 1100000110 */
+ 0xe1, 0x80, /* 1110000110 */
+ 0x73, 0x00, /* 0111001100 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 163 0xa3 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xf7, 0x80, /* 1111011110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x77, 0x00, /* 0111011100 */
+ 0x3d, 0x80, /* 0011110110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 164 0xa4 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x38, 0x80, /* 0011100010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x47, 0x00, /* 0100011100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x6f, 0x00, /* 0110111100 */
+ 0x7b, 0x80, /* 0111101110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x7b, 0xc0, /* 0111101111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 165 0xa5 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x38, 0x80, /* 0011100010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x47, 0x00, /* 0100011100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xe3, 0xc0, /* 1110001111 */
+ 0x71, 0x80, /* 0111000110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x79, 0x80, /* 0111100110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x67, 0x80, /* 0110011110 */
+ 0x63, 0x80, /* 0110001110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xf0, 0xc0, /* 1111000011 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 166 0xa6 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x0f, 0x00, /* 0000111100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x67, 0x00, /* 0110011100 */
+ 0x3b, 0x80, /* 0011101110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 167 0xa7 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x21, 0x80, /* 0010000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x00, /* 0110000100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 168 0xa8 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x80, /* 0110000010 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 169 0xa9 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 170 0xaa '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 171 0xab '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x20, 0x80, /* 0010000010 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x22, 0x00, /* 0010001000 */
+ 0x74, 0x00, /* 0111010000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x17, 0x00, /* 0001011100 */
+ 0x28, 0x80, /* 0010100010 */
+ 0x43, 0x00, /* 0100001100 */
+ 0x04, 0x00, /* 0000010000 */
+ 0x08, 0x00, /* 0000100000 */
+ 0x0f, 0x80, /* 0000111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 172 0xac '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x20, 0x00, /* 0010000000 */
+ 0x20, 0x80, /* 0010000010 */
+ 0x21, 0x00, /* 0010000100 */
+ 0x22, 0x00, /* 0010001000 */
+ 0x74, 0x00, /* 0111010000 */
+ 0x09, 0x00, /* 0000100100 */
+ 0x13, 0x00, /* 0001001100 */
+ 0x25, 0x00, /* 0010010100 */
+ 0x49, 0x00, /* 0100100100 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x01, 0x00, /* 0000000100 */
+ 0x01, 0x00, /* 0000000100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 173 0xad '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 174 0xae '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0d, 0x80, /* 0000110110 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0xd8, 0x00, /* 1101100000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x0d, 0x80, /* 0000110110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 175 0xaf '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x0d, 0x80, /* 0000110110 */
+ 0x06, 0xc0, /* 0000011011 */
+ 0x0d, 0x80, /* 0000110110 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 176 0xb0 '.' */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x41, 0x00, /* 0100000100 */
+ 0x18, 0x40, /* 0001100001 */
+ 0x10, 0x40, /* 0001000001 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x41, 0x00, /* 0100000100 */
+ 0x18, 0x40, /* 0001100001 */
+ 0x10, 0x40, /* 0001000001 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x41, 0x00, /* 0100000100 */
+ 0x18, 0x40, /* 0001100001 */
+ 0x10, 0x40, /* 0001000001 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x41, 0x00, /* 0100000100 */
+ 0x18, 0x40, /* 0001100001 */
+ 0x10, 0x40, /* 0001000001 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x41, 0x00, /* 0100000100 */
+
+ /* 177 0xb1 '.' */
+ 0x11, 0x00, /* 0001000100 */
+ 0xbb, 0x80, /* 1011101110 */
+ 0x11, 0x00, /* 0001000100 */
+ 0x44, 0x40, /* 0100010001 */
+ 0xee, 0xc0, /* 1110111011 */
+ 0x44, 0x40, /* 0100010001 */
+ 0x11, 0x00, /* 0001000100 */
+ 0xbb, 0x80, /* 1011101110 */
+ 0x11, 0x00, /* 0001000100 */
+ 0x44, 0x40, /* 0100010001 */
+ 0xee, 0xc0, /* 1110111011 */
+ 0x44, 0x40, /* 0100010001 */
+ 0x11, 0x00, /* 0001000100 */
+ 0xbb, 0x80, /* 1011101110 */
+ 0x11, 0x00, /* 0001000100 */
+ 0x44, 0x40, /* 0100010001 */
+ 0xee, 0xc0, /* 1110111011 */
+ 0x44, 0x40, /* 0100010001 */
+
+ /* 178 0xb2 '.' */
+ 0x3c, 0xc0, /* 0011110011 */
+ 0xbe, 0xc0, /* 1011111011 */
+ 0xe7, 0x80, /* 1110011110 */
+ 0xef, 0x80, /* 1110111110 */
+ 0x3c, 0xc0, /* 0011110011 */
+ 0xbe, 0xc0, /* 1011111011 */
+ 0xe7, 0x80, /* 1110011110 */
+ 0xef, 0x80, /* 1110111110 */
+ 0x3c, 0xc0, /* 0011110011 */
+ 0xbe, 0xc0, /* 1011111011 */
+ 0xe7, 0x80, /* 1110011110 */
+ 0xef, 0x80, /* 1110111110 */
+ 0x3c, 0xc0, /* 0011110011 */
+ 0xbe, 0xc0, /* 1011111011 */
+ 0xe7, 0x80, /* 1110011110 */
+ 0xef, 0x80, /* 1110111110 */
+ 0x3c, 0xc0, /* 0011110011 */
+ 0xbe, 0xc0, /* 1011111011 */
+
+ /* 179 0xb3 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 180 0xb4 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 181 0xb5 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 182 0xb6 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 183 0xb7 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0x00, /* 1111111100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 184 0xb8 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 185 0xb9 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 186 0xba '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 187 0xbb '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0x00, /* 1111111100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 188 0xbc '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0xfb, 0x00, /* 1111101100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 189 0xbd '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 190 0xbe '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 191 0xbf '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 192 0xc0 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 193 0xc1 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 194 0xc2 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 195 0xc3 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 196 0xc4 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 197 0xc5 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 198 0xc6 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 199 0xc7 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 200 0xc8 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 201 0xc9 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 202 0xca '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 203 0xcb '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 204 0xcc '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0xc0, /* 0001101111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 205 0xcd '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 206 0xce '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0xfb, 0xc0, /* 1111101111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 207 0xcf '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 208 0xd0 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 209 0xd1 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 210 0xd2 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 211 0xd3 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 212 0xd4 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 213 0xd5 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 214 0xd6 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 215 0xd7 '.' */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+ 0x1b, 0x00, /* 0001101100 */
+
+ /* 216 0xd8 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 217 0xd9 '.' */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0xfc, 0x00, /* 1111110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 218 0xda '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+
+ /* 219 0xdb '.' */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+
+ /* 220 0xdc '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+
+ /* 221 0xdd '.' */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+ 0xf8, 0x00, /* 1111100000 */
+
+ /* 222 0xde '.' */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+ 0x07, 0xc0, /* 0000011111 */
+
+ /* 223 0xdf '.' */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0xff, 0xc0, /* 1111111111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 224 0xe0 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1c, 0x80, /* 0001110010 */
+ 0x35, 0x80, /* 0011010110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x37, 0x80, /* 0011011110 */
+ 0x1c, 0x80, /* 0001110010 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 225 0xe1 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x6f, 0x00, /* 0110111100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x6e, 0x00, /* 0110111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 226 0xe2 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 227 0xe3 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 228 0xe4 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0x80, /* 1111111110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xff, 0x80, /* 1111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 229 0xe5 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1f, 0xc0, /* 0001111111 */
+ 0x36, 0x00, /* 0011011000 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 230 0xe6 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x73, 0x80, /* 0111001110 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0xc0, 0x00, /* 1100000000 */
+
+ /* 231 0xe7 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x36, 0x40, /* 0011011001 */
+ 0x5e, 0x00, /* 0101111000 */
+ 0x8c, 0x00, /* 1000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 232 0xe8 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 233 0xe9 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x60, 0xc0, /* 0110000011 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x7f, 0xc0, /* 0111111111 */
+ 0x60, 0xc0, /* 0110000011 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x31, 0x80, /* 0011000110 */
+ 0x1f, 0x00, /* 0001111100 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 234 0xea '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0xc0, 0xc0, /* 1100000011 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0xf3, 0xc0, /* 1111001111 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 235 0xeb '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x07, 0x00, /* 0000011100 */
+ 0x1f, 0x80, /* 0001111110 */
+ 0x30, 0xc0, /* 0011000011 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x3e, 0x00, /* 0011111000 */
+ 0x66, 0x00, /* 0110011000 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0xc3, 0x00, /* 1100001100 */
+ 0x66, 0x00, /* 0110011000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 236 0xec '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 237 0xed '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x01, 0x80, /* 0000000110 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x37, 0x00, /* 0011011100 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0xcc, 0xc0, /* 1100110011 */
+ 0x6d, 0x80, /* 0110110110 */
+ 0x3b, 0x00, /* 0011101100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x60, 0x00, /* 0110000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 238 0xee '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x3f, 0x80, /* 0011111110 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 239 0xef '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x61, 0x80, /* 0110000110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 240 0xf0 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 241 0xf1 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 242 0xf2 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xe0, 0x00, /* 1110000000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0xe0, 0x00, /* 1110000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0x00, /* 1111111100 */
+ 0xff, 0x00, /* 1111111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 243 0xf3 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0xe0, 0x00, /* 1110000000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x0e, 0x00, /* 0000111000 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0xff, 0x80, /* 1111111110 */
+ 0xff, 0x80, /* 1111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 244 0xf4 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x30, 0x00, /* 0011000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 245 0xf5 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x03, 0x00, /* 0000001100 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 246 0xf6 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 247 0xf7 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x06, 0xc0, /* 0000011011 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x38, 0x00, /* 0011100000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x06, 0xc0, /* 0000011011 */
+ 0x03, 0x80, /* 0000001110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 248 0xf8 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x33, 0x00, /* 0011001100 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 249 0xf9 '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 250 0xfa '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 251 0xfb '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0f, 0xc0, /* 0000111111 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0xcc, 0x00, /* 1100110000 */
+ 0x6c, 0x00, /* 0110110000 */
+ 0x3c, 0x00, /* 0011110000 */
+ 0x1c, 0x00, /* 0001110000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 252 0xfc '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x27, 0x00, /* 0010011100 */
+ 0x7b, 0x00, /* 0111101100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x31, 0x00, /* 0011000100 */
+ 0x7b, 0x80, /* 0111101110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 253 0xfd '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x1e, 0x00, /* 0001111000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x63, 0x00, /* 0110001100 */
+ 0x43, 0x00, /* 0100001100 */
+ 0x06, 0x00, /* 0000011000 */
+ 0x0c, 0x00, /* 0000110000 */
+ 0x18, 0x00, /* 0001100000 */
+ 0x30, 0x80, /* 0011000010 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x7f, 0x80, /* 0111111110 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 254 0xfe '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x3f, 0x00, /* 0011111100 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+
+ /* 255 0xff '.' */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+ 0x00, 0x00, /* 0000000000 */
+} };
+
+
+const struct font_desc font_10x18 = {
+ .idx = FONT10x18_IDX,
+ .name = "10x18",
+ .width = 10,
+ .height = 18,
+ .charcount = 256,
+ .data = fontdata_10x18.data,
+#ifdef __sparc__
+ .pref = 5,
+#else
+ .pref = -1,
+#endif
+};
diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c
new file mode 100644
index 000000000..e65df019e
--- /dev/null
+++ b/lib/fonts/font_6x10.c
@@ -0,0 +1,3089 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+
+#define FONTDATAMAX 2560
+
+static const struct font_data fontdata_6x10 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 1 0x01 '^A' */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x84, /* 10000100 */
+ 0xCC, /* 11001100 */
+ 0x84, /* 10000100 */
+ 0xCC, /* 11001100 */
+ 0xB4, /* 10110100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 2 0x02 '^B' */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0xFC, /* 11111100 */
+ 0xB4, /* 10110100 */
+ 0xFC, /* 11111100 */
+ 0xB4, /* 10110100 */
+ 0xCC, /* 11001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x7C, /* 01111100 */
+ 0x7C, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7C, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x6C, /* 01101100 */
+ 0x6C, /* 01101100 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7C, /* 01111100 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 8 0x08 '^H' */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xCC, /* 11001100 */
+ 0x84, /* 10000100 */
+ 0xCC, /* 11001100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x48, /* 01001000 */
+ 0x84, /* 10000100 */
+ 0x48, /* 01001000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 10 0x0A '^J' */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xCC, /* 11001100 */
+ 0xB4, /* 10110100 */
+ 0x78, /* 01111000 */
+ 0xB4, /* 10110100 */
+ 0xCC, /* 11001100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+
+ /* 11 0x0B '^K' */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x14, /* 00010100 */
+ 0x20, /* 00100000 */
+ 0x78, /* 01111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 12 0x0C '^L' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 13 0x0D '^M' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x14, /* 00010100 */
+ 0x14, /* 00010100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x70, /* 01110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 14 0x0E '^N' */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x24, /* 00100100 */
+ 0x3C, /* 00111100 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x6C, /* 01101100 */
+ 0x6C, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 15 0x0F '^O' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x6C, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 16 0x10 '^P' */
+ 0x00, /* 00000000 */
+ 0x40, /* 01000000 */
+ 0x60, /* 01100000 */
+ 0x70, /* 01110000 */
+ 0x78, /* 01111000 */
+ 0x70, /* 01110000 */
+ 0x60, /* 01100000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000100 */
+ 0x0C, /* 00001100 */
+ 0x1C, /* 00011100 */
+ 0x3C, /* 00111100 */
+ 0x1C, /* 00011100 */
+ 0x0C, /* 00001100 */
+ 0x04, /* 00000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 18 0x12 '^R' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x10, /* 00010000 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 19 0x13 '^S' */
+ 0x00, /* 00000000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 01001000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 20 0x14 '^T' */
+ 0x3C, /* 00111100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x3C, /* 00111100 */
+ 0x14, /* 00010100 */
+ 0x14, /* 00010100 */
+ 0x14, /* 00010100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 21 0x15 '^U' */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x20, /* 00100000 */
+ 0x50, /* 01010000 */
+ 0x48, /* 01001000 */
+ 0x24, /* 00100100 */
+ 0x14, /* 00010100 */
+ 0x08, /* 00001000 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xF8, /* 11111000 */
+ 0xF8, /* 11111000 */
+ 0xF8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 23 0x17 '^W' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x10, /* 00010000 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 24 0x18 '^X' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 26 0x1A '^Z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x7C, /* 01111100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 27 0x1B '^[' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x7C, /* 01111100 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 28 0x1C '^\' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 29 0x1D '^]' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 01001000 */
+ 0x84, /* 10000100 */
+ 0xFC, /* 11111100 */
+ 0x84, /* 10000100 */
+ 0x48, /* 01001000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 30 0x1E '^^' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x7C, /* 01111100 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 31 0x1F '^_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x7C, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 33 0x21 '!' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 34 0x22 '"' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x7C, /* 01111100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x7C, /* 01111100 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 36 0x24 '$' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x50, /* 01010000 */
+ 0x38, /* 00111000 */
+ 0x14, /* 00010100 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, /* 00000000 */
+ 0x64, /* 01100100 */
+ 0x64, /* 01100100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x4C, /* 01001100 */
+ 0x4C, /* 01001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 38 0x26 '&' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x48, /* 01001000 */
+ 0x50, /* 01010000 */
+ 0x20, /* 00100000 */
+ 0x54, /* 01010100 */
+ 0x48, /* 01001000 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 39 0x27 ''' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 40 0x28 '(' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+
+ /* 41 0x29 ')' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x00, /* 00000000 */
+
+ /* 42 0x2A '*' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 43 0x2B '+' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 44 0x2C ',' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+
+ /* 45 0x2D '-' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 46 0x2E '.' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 47 0x2F '/' */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+
+ /* 48 0x30 '0' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x54, /* 01010100 */
+ 0x64, /* 01100100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 49 0x31 '1' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x30, /* 00110000 */
+ 0x50, /* 01010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 50 0x32 '2' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 51 0x33 '3' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x04, /* 00000100 */
+ 0x18, /* 00011000 */
+ 0x04, /* 00000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 52 0x34 '4' */
+ 0x00, /* 00000000 */
+ 0x08, /* 00001000 */
+ 0x18, /* 00011000 */
+ 0x28, /* 00101000 */
+ 0x48, /* 01001000 */
+ 0x7C, /* 01111100 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 53 0x35 '5' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 54 0x36 '6' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x20, /* 00100000 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 55 0x37 '7' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 56 0x38 '8' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 57 0x39 '9' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 58 0x3A ':' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 59 0x3B ';' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+
+ /* 60 0x3C '<' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x04, /* 00000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 61 0x3D '=' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 62 0x3E '>' */
+ 0x00, /* 00000000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 63 0x3F '?' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 64 0x40 '@' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x5C, /* 01011100 */
+ 0x54, /* 01010100 */
+ 0x5C, /* 01011100 */
+ 0x40, /* 01000000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 65 0x41 'A' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 66 0x42 'B' */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x38, /* 00111000 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 67 0x43 'C' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 68 0x44 'D' */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 69 0x45 'E' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 70 0x46 'F' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 71 0x47 'G' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x5C, /* 01011100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 72 0x48 'H' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 73 0x49 'I' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 74 0x4A 'J' */
+ 0x00, /* 00000000 */
+ 0x1C, /* 00011100 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 75 0x4B 'K' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x48, /* 01001000 */
+ 0x50, /* 01010000 */
+ 0x60, /* 01100000 */
+ 0x50, /* 01010000 */
+ 0x48, /* 01001000 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 76 0x4C 'L' */
+ 0x00, /* 00000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 77 0x4D 'M' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x6C, /* 01101100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 78 0x4E 'N' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x64, /* 01100100 */
+ 0x54, /* 01010100 */
+ 0x4C, /* 01001100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 79 0x4F 'O' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 80 0x50 'P' */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x78, /* 01111000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 81 0x51 'Q' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x54, /* 01010100 */
+ 0x48, /* 01001000 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 82 0x52 'R' */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x78, /* 01111000 */
+ 0x50, /* 01010000 */
+ 0x48, /* 01001000 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 83 0x53 'S' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 84 0x54 'T' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 85 0x55 'U' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 86 0x56 'V' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 87 0x57 'W' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x6C, /* 01101100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 88 0x58 'X' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 89 0x59 'Y' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 90 0x5A 'Z' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x40, /* 01000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 91 0x5B '[' */
+ 0x18, /* 00011000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 92 0x5C '\' */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+
+ /* 93 0x5D ']' */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+
+ /* 94 0x5E '^' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 95 0x5F '_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 96 0x60 '`' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 98 0x62 'b' */
+ 0x00, /* 00000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x58, /* 01011000 */
+ 0x64, /* 01100100 */
+ 0x44, /* 01000100 */
+ 0x64, /* 01100100 */
+ 0x58, /* 01011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 100 0x64 'd' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+ 0x34, /* 00110100 */
+ 0x4C, /* 01001100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 102 0x66 'f' */
+ 0x00, /* 00000000 */
+ 0x0C, /* 00001100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x34, /* 00110100 */
+ 0x4C, /* 01001100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x04, /* 00000100 */
+ 0x38, /* 00111000 */
+
+ /* 104 0x68 'h' */
+ 0x00, /* 00000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 105 0x69 'i' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 106 0x6A 'j' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x60, /* 01100000 */
+
+ /* 107 0x6B 'k' */
+ 0x00, /* 00000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x48, /* 01001000 */
+ 0x50, /* 01010000 */
+ 0x70, /* 01110000 */
+ 0x48, /* 01001000 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 108 0x6C 'l' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 109 0x6D 'm' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x68, /* 01101000 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 110 0x6E 'n' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 01011000 */
+ 0x64, /* 01100100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 111 0x6F 'o' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 01011000 */
+ 0x64, /* 01100100 */
+ 0x44, /* 01000100 */
+ 0x64, /* 01100100 */
+ 0x58, /* 01011000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x34, /* 00110100 */
+ 0x4C, /* 01001100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 01011000 */
+ 0x64, /* 01100100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x40, /* 01000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 116 0x74 't' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x0C, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x04, /* 00000100 */
+ 0x38, /* 00111000 */
+
+ /* 122 0x7A 'z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 123 0x7B '{' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+
+ /* 124 0x7C '|' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 125 0x7D '}' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x00, /* 00000000 */
+
+ /* 126 0x7E '~' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x20, /* 00100000 */
+ 0x54, /* 01010100 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 127 0x7F '' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 128 0x80 '\200' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+
+ /* 129 0x81 '\201' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 130 0x82 '\202' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 131 0x83 '\203' */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 132 0x84 '\204' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 133 0x85 '\205' */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 134 0x86 '\206' */
+ 0x18, /* 00011000 */
+ 0x24, /* 00100100 */
+ 0x18, /* 00011000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 135 0x87 '\207' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+
+ /* 136 0x88 '\210' */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 137 0x89 '\211' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 138 0x8A '\212' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 139 0x8B '\213' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 140 0x8C '\214' */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 141 0x8D '\215' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 142 0x8E '\216' */
+ 0x44, /* 01000100 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 143 0x8F '\217' */
+ 0x30, /* 00110000 */
+ 0x48, /* 01001000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 144 0x90 '\220' */
+ 0x10, /* 00010000 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x78, /* 01111000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 145 0x91 '\221' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x14, /* 00010100 */
+ 0x7C, /* 01111100 */
+ 0x50, /* 01010000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 146 0x92 '\222' */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x50, /* 01010000 */
+ 0x50, /* 01010000 */
+ 0x78, /* 01111000 */
+ 0x50, /* 01010000 */
+ 0x50, /* 01010000 */
+ 0x5C, /* 01011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 147 0x93 '\223' */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 148 0x94 '\224' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 149 0x95 '\225' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 150 0x96 '\226' */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 151 0x97 '\227' */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 152 0x98 '\230' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x04, /* 00000100 */
+ 0x38, /* 00111000 */
+
+ /* 153 0x99 '\231' */
+ 0x84, /* 10000100 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 154 0x9A '\232' */
+ 0x88, /* 10001000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 155 0x9B '\233' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x50, /* 01010000 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 156 0x9C '\234' */
+ 0x30, /* 00110000 */
+ 0x48, /* 01001000 */
+ 0x40, /* 01000000 */
+ 0x70, /* 01110000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x44, /* 01000100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 157 0x9D '\235' */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 158 0x9E '\236' */
+ 0x00, /* 00000000 */
+ 0x70, /* 01110000 */
+ 0x48, /* 01001000 */
+ 0x70, /* 01110000 */
+ 0x48, /* 01001000 */
+ 0x5C, /* 01011100 */
+ 0x48, /* 01001000 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 159 0x9F '\237' */
+ 0x00, /* 00000000 */
+ 0x0C, /* 00001100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 160 0xA0 '\240' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 161 0xA1 '\241' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 162 0xA2 '\242' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 163 0xA3 '\243' */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x4C, /* 01001100 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 164 0xA4 '\244' */
+ 0x34, /* 00110100 */
+ 0x58, /* 01011000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 01011000 */
+ 0x64, /* 01100100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 165 0xA5 '\245' */
+ 0x58, /* 01011000 */
+ 0x44, /* 01000100 */
+ 0x64, /* 01100100 */
+ 0x54, /* 01010100 */
+ 0x4C, /* 01001100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 166 0xA6 '\246' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x04, /* 00000100 */
+ 0x3C, /* 00111100 */
+ 0x44, /* 01000100 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 167 0xA7 '\247' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 168 0xA8 '\250' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x40, /* 01000000 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 169 0xA9 '\251' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 170 0xAA '\252' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x04, /* 00000100 */
+ 0x04, /* 00000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 171 0xAB '\253' */
+ 0x20, /* 00100000 */
+ 0x60, /* 01100000 */
+ 0x24, /* 00100100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x44, /* 01000100 */
+ 0x08, /* 00001000 */
+ 0x1C, /* 00011100 */
+ 0x00, /* 00000000 */
+
+ /* 172 0xAC '\254' */
+ 0x20, /* 00100000 */
+ 0x60, /* 01100000 */
+ 0x24, /* 00100100 */
+ 0x28, /* 00101000 */
+ 0x10, /* 00010000 */
+ 0x28, /* 00101000 */
+ 0x58, /* 01011000 */
+ 0x3C, /* 00111100 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+
+ /* 173 0xAD '\255' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 174 0xAE '\256' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x24, /* 00100100 */
+ 0x48, /* 01001000 */
+ 0x90, /* 10010000 */
+ 0x48, /* 01001000 */
+ 0x24, /* 00100100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 175 0xAF '\257' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x90, /* 10010000 */
+ 0x48, /* 01001000 */
+ 0x24, /* 00100100 */
+ 0x48, /* 01001000 */
+ 0x90, /* 10010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 176 0xB0 '\260' */
+ 0x10, /* 00010000 */
+ 0x44, /* 01000100 */
+ 0x10, /* 00010000 */
+ 0x44, /* 01000100 */
+ 0x10, /* 00010000 */
+ 0x44, /* 01000100 */
+ 0x10, /* 00010000 */
+ 0x44, /* 01000100 */
+ 0x10, /* 00010000 */
+ 0x44, /* 01000100 */
+
+ /* 177 0xB1 '\261' */
+ 0xA8, /* 10101000 */
+ 0x54, /* 01010100 */
+ 0xA8, /* 10101000 */
+ 0x54, /* 01010100 */
+ 0xA8, /* 10101000 */
+ 0x54, /* 01010100 */
+ 0xA8, /* 10101000 */
+ 0x54, /* 01010100 */
+ 0xA8, /* 10101000 */
+ 0x54, /* 01010100 */
+
+ /* 178 0xB2 '\262' */
+ 0xDC, /* 11011100 */
+ 0x74, /* 01110100 */
+ 0xDC, /* 11011100 */
+ 0x74, /* 01110100 */
+ 0xDC, /* 11011100 */
+ 0x74, /* 01110100 */
+ 0xDC, /* 11011100 */
+ 0x74, /* 01110100 */
+ 0xDC, /* 11011100 */
+ 0x74, /* 01110100 */
+
+ /* 179 0xB3 '\263' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 180 0xB4 '\264' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 181 0xB5 '\265' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 182 0xB6 '\266' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xE8, /* 11101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 183 0xB7 '\267' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xF8, /* 11111000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 184 0xB8 '\270' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 185 0xB9 '\271' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xE8, /* 11101000 */
+ 0x08, /* 00001000 */
+ 0xE8, /* 11101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 186 0xBA '\272' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 187 0xBB '\273' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xF8, /* 11111000 */
+ 0x08, /* 00001000 */
+ 0xE8, /* 11101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 188 0xBC '\274' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xE8, /* 11101000 */
+ 0x08, /* 00001000 */
+ 0xF8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 189 0xBD '\275' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xF8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 190 0xBE '\276' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 191 0xBF '\277' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xF0, /* 11110000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 192 0xC0 '\300' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 193 0xC1 '\301' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 194 0xC2 '\302' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 195 0xC3 '\303' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 196 0xC4 '\304' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 197 0xC5 '\305' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xFC, /* 11111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 198 0xC6 '\306' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 199 0xC7 '\307' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x2C, /* 00101100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 200 0xC8 '\310' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x2C, /* 00101100 */
+ 0x20, /* 00100000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 201 0xC9 '\311' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x20, /* 00100000 */
+ 0x2C, /* 00101100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 202 0xCA '\312' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xEC, /* 11101100 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 203 0xCB '\313' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0xEC, /* 11101100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 204 0xCC '\314' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x2C, /* 00101100 */
+ 0x20, /* 00100000 */
+ 0x2C, /* 00101100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 205 0xCD '\315' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 206 0xCE '\316' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xEC, /* 11101100 */
+ 0x00, /* 00000000 */
+ 0xEC, /* 11101100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 207 0xCF '\317' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 208 0xD0 '\320' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 209 0xD1 '\321' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 210 0xD2 '\322' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 211 0xD3 '\323' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 212 0xD4 '\324' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 213 0xD5 '\325' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 214 0xD6 '\326' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 215 0xD7 '\327' */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0xFC, /* 11111100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+
+ /* 216 0xD8 '\330' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xFC, /* 11111100 */
+ 0x10, /* 00010000 */
+ 0xFC, /* 11111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 217 0xD9 '\331' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0xF0, /* 11110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 218 0xDA '\332' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1C, /* 00011100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 219 0xDB '\333' */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+
+ /* 220 0xDC '\334' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+
+ /* 221 0xDD '\335' */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+ 0xE0, /* 11100000 */
+
+ /* 222 0xDE '\336' */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+ 0x1C, /* 00011100 */
+
+ /* 223 0xDF '\337' */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 224 0xE0 '\340' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x34, /* 00110100 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x34, /* 00110100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 225 0xE1 '\341' */
+ 0x18, /* 00011000 */
+ 0x24, /* 00100100 */
+ 0x44, /* 01000100 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x58, /* 01011000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+
+ /* 226 0xE2 '\342' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 227 0xE3 '\343' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 228 0xE4 '\344' */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x24, /* 00100100 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x24, /* 00100100 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 229 0xE5 '\345' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 230 0xE6 '\346' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x74, /* 01110100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+
+ /* 231 0xE7 '\347' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x0C, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 232 0xE8 '\350' */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 233 0xE9 '\351' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x7C, /* 01111100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 234 0xEA '\352' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x28, /* 00101000 */
+ 0x6C, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 235 0xEB '\353' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x20, /* 00100000 */
+ 0x18, /* 00011000 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x24, /* 00100100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 236 0xEC '\354' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 237 0xED '\355' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000100 */
+ 0x38, /* 00111000 */
+ 0x54, /* 01010100 */
+ 0x54, /* 01010100 */
+ 0x38, /* 00111000 */
+ 0x40, /* 01000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 238 0xEE '\356' */
+ 0x00, /* 00000000 */
+ 0x3C, /* 00111100 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x38, /* 00111000 */
+ 0x40, /* 01000000 */
+ 0x40, /* 01000000 */
+ 0x3C, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 239 0xEF '\357' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x44, /* 01000100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 240 0xF0 '\360' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0xFC, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 241 0xF1 '\361' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x7C, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 242 0xF2 '\362' */
+ 0x00, /* 00000000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 243 0xF3 '\363' */
+ 0x00, /* 00000000 */
+ 0x08, /* 00001000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x10, /* 00010000 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 244 0xF4 '\364' */
+ 0x00, /* 00000000 */
+ 0x0C, /* 00001100 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+
+ /* 245 0xF5 '\365' */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x10, /* 00010000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+
+ /* 246 0xF6 '\366' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x7C, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 247 0xF7 '\367' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x20, /* 00100000 */
+ 0x54, /* 01010100 */
+ 0x08, /* 00001000 */
+ 0x20, /* 00100000 */
+ 0x54, /* 01010100 */
+ 0x08, /* 00001000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 248 0xF8 '\370' */
+ 0x30, /* 00110000 */
+ 0x48, /* 01001000 */
+ 0x48, /* 01001000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 249 0xF9 '\371' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 250 0xFA '\372' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 251 0xFB '\373' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000100 */
+ 0x08, /* 00001000 */
+ 0x08, /* 00001000 */
+ 0x50, /* 01010000 */
+ 0x50, /* 01010000 */
+ 0x20, /* 00100000 */
+ 0x20, /* 00100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 252 0xFC '\374' */
+ 0x60, /* 01100000 */
+ 0x50, /* 01010000 */
+ 0x50, /* 01010000 */
+ 0x50, /* 01010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 253 0xFD '\375' */
+ 0x60, /* 01100000 */
+ 0x10, /* 00010000 */
+ 0x20, /* 00100000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 254 0xFE '\376' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 255 0xFF '\377' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+} };
+
+const struct font_desc font_6x10 = {
+ .idx = FONT6x10_IDX,
+ .name = "6x10",
+ .width = 6,
+ .height = 10,
+ .charcount = 256,
+ .data = fontdata_6x10.data,
+ .pref = 0,
+};
diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c
new file mode 100644
index 000000000..bd76b3f6b
--- /dev/null
+++ b/lib/fonts/font_6x11.c
@@ -0,0 +1,3353 @@
+// SPDX-License-Identifier: GPL-2.0
+/**********************************************/
+/* */
+/* Font file generated by rthelen */
+/* */
+/**********************************************/
+
+#include <linux/font.h>
+
+#define FONTDATAMAX (11*256)
+
+static const struct font_data fontdata_6x11 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 1 0x01 '^A' */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x84, /* 0000 00 */
+ 0xcc, /* 00 00 */
+ 0x84, /* 0000 00 */
+ 0xb4, /* 0 0 00 */
+ 0x84, /* 0000 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 2 0x02 '^B' */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0xfc, /* 00 */
+ 0xb4, /* 0 0 00 */
+ 0xfc, /* 00 */
+ 0xcc, /* 00 00 */
+ 0xfc, /* 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x7c, /* 0 00 */
+ 0x7c, /* 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x7c, /* 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x6c, /* 0 0 00 */
+ 0x6c, /* 0 0 00 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x7c, /* 0 00 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x78, /* 0 000 */
+ 0x30, /* 00 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 8 0x08 '^H' */
+ 0xff, /* */
+ 0xff, /* */
+ 0xff, /* */
+ 0xcf, /* 00 */
+ 0x87, /* 0000 */
+ 0xcf, /* 00 */
+ 0xff, /* */
+ 0xff, /* */
+ 0xff, /* */
+ 0xff, /* */
+ 0xff, /* */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x48, /* 0 00 000 */
+ 0x84, /* 0000 00 */
+ 0x48, /* 0 00 000 */
+ 0x30, /* 00 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 10 0x0a '^J' */
+ 0xff, /* */
+ 0xff, /* */
+ 0xcf, /* 00 */
+ 0xb7, /* 0 0 */
+ 0x7b, /* 0 0 */
+ 0xb7, /* 0 0 */
+ 0xcf, /* 00 */
+ 0xff, /* */
+ 0xff, /* */
+ 0xff, /* */
+ 0xff, /* */
+
+ /* 11 0x0b '^K' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x14, /* 000 0 00 */
+ 0x20, /* 00 00000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 12 0x0c '^L' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 13 0x0d '^M' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x24, /* 00 00 00 */
+ 0x3c, /* 00 00 */
+ 0x20, /* 00 00000 */
+ 0x20, /* 00 00000 */
+ 0xe0, /* 00000 */
+ 0xc0, /* 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 14 0x0e '^N' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0xcc, /* 00 00 */
+ 0xcc, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 15 0x0f '^O' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x6c, /* 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 16 0x10 '^P' */
+ 0x00, /* 00000000 */
+ 0x40, /* 0 000000 */
+ 0x60, /* 0 00000 */
+ 0x70, /* 0 0000 */
+ 0x7c, /* 0 00 */
+ 0x70, /* 0 0000 */
+ 0x60, /* 0 00000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000 00 */
+ 0x0c, /* 0000 00 */
+ 0x1c, /* 000 00 */
+ 0x7c, /* 0 00 */
+ 0x1c, /* 000 00 */
+ 0x0c, /* 0000 00 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 18 0x12 '^R' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x10, /* 000 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 19 0x13 '^S' */
+ 0x00, /* 00000000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 0 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 20 0x14 '^T' */
+ 0x3c, /* 00 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x3c, /* 00 00 */
+ 0x14, /* 000 0 00 */
+ 0x14, /* 000 0 00 */
+ 0x14, /* 000 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 21 0x15 '^U' */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x24, /* 00 00 00 */
+ 0x50, /* 0 0 0000 */
+ 0x48, /* 0 00 000 */
+ 0x24, /* 00 00 00 */
+ 0x14, /* 000 0 00 */
+ 0x48, /* 0 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 000 */
+ 0xf8, /* 000 */
+ 0xf8, /* 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 23 0x17 '^W' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x10, /* 000 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 24 0x18 '^X' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x7c, /* 0 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x7c, /* 0 00 */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 0 00 000 */
+ 0x84, /* 0000 00 */
+ 0xfc, /* 00 */
+ 0x84, /* 0000 00 */
+ 0x48, /* 0 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x7c, /* 0 00 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 31 0x1f '^`' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x7c, /* 0 00 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 33 0x21 '!' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 34 0x22 '"' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x7c, /* 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x7c, /* 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 36 0x24 '$' */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x50, /* 0 0 0000 */
+ 0x38, /* 00 000 */
+ 0x14, /* 000 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, /* 00000000 */
+ 0x64, /* 0 00 00 */
+ 0x64, /* 0 00 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x4c, /* 0 00 00 */
+ 0x4c, /* 0 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 38 0x26 '&' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x48, /* 0 00 000 */
+ 0x50, /* 0 0 0000 */
+ 0x20, /* 00 00000 */
+ 0x54, /* 0 0 0 00 */
+ 0x48, /* 0 00 000 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 39 0x27 ''' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 40 0x28 '(' */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 41 0x29 ')' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x30, /* 00 0000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+
+ /* 45 0x2d '-' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 000 000 */
+ 0x18, /* 000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 47 0x2f '/' */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x20, /* 00 00000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+
+ /* 48 0x30 '0' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x64, /* 0 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 49 0x31 '1' */
+ 0x00, /* 00000000 */
+ 0x08, /* 0000 000 */
+ 0x18, /* 000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 50 0x32 '2' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 51 0x33 '3' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x04, /* 00000 00 */
+ 0x18, /* 000 000 */
+ 0x04, /* 00000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 52 0x34 '4' */
+ 0x00, /* 00000000 */
+ 0x08, /* 0000 000 */
+ 0x18, /* 000 000 */
+ 0x28, /* 00 0 000 */
+ 0x48, /* 0 00 000 */
+ 0x7c, /* 0 00 */
+ 0x08, /* 0000 000 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 53 0x35 '5' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 54 0x36 '6' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 55 0x37 '7' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 56 0x38 '8' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 57 0x39 '9' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x3c, /* 00 00 */
+ 0x04, /* 00000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 000 000 */
+ 0x18, /* 000 000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 000 000 */
+ 0x18, /* 000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x30, /* 00 0000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x30, /* 00 0000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+
+ /* 60 0x3c '<' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 62 0x3e '>' */
+ 0x00, /* 00000000 */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 63 0x3f '?' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 64 0x40 '@' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x74, /* 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 65 0x41 'A' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 66 0x42 'B' */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 67 0x43 'C' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 68 0x44 'D' */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 69 0x45 'E' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 70 0x46 'F' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 71 0x47 'G' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x4c, /* 0 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 72 0x48 'H' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 73 0x49 'I' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 74 0x4a 'J' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 75 0x4b 'K' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x48, /* 0 00 000 */
+ 0x50, /* 0 0 0000 */
+ 0x60, /* 0 00000 */
+ 0x50, /* 0 0 0000 */
+ 0x48, /* 0 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 76 0x4c 'L' */
+ 0x00, /* 00000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 77 0x4d 'M' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x6c, /* 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 78 0x4e 'N' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x64, /* 0 00 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x4c, /* 0 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 79 0x4f 'O' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 80 0x50 'P' */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 81 0x51 'Q' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 82 0x52 'R' */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 83 0x53 'S' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x38, /* 00 000 */
+ 0x04, /* 00000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 84 0x54 'T' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 85 0x55 'U' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 86 0x56 'V' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 87 0x57 'W' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x6c, /* 0 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 88 0x58 'X' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 89 0x59 'Y' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 90 0x5a 'Z' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x40, /* 0 000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 91 0x5b '[' */
+ 0x0c, /* 0000 00 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x0c, /* 0000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 92 0x5c '\' */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x20, /* 00 00000 */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+
+ /* 93 0x5d ']' */
+ 0x30, /* 00 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x30, /* 00 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 94 0x5e '^' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 96 0x60 '`' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 98 0x62 'b' */
+ 0x00, /* 00000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 100 0x64 'd' */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 102 0x66 'f' */
+ 0x00, /* 00000000 */
+ 0x0c, /* 0000 00 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x3c, /* 00 00 */
+ 0x04, /* 00000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+
+ /* 104 0x68 'h' */
+ 0x00, /* 00000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 105 0x69 'i' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 106 0x6a 'j' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x60, /* 0 00000 */
+ 0x00, /* 00000000 */
+
+ /* 107 0x6b 'k' */
+ 0x00, /* 00000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x48, /* 0 00 000 */
+ 0x50, /* 0 0 0000 */
+ 0x70, /* 0 0000 */
+ 0x48, /* 0 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 108 0x6c 'l' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 0 0 000 */
+ 0x64, /* 0 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x3c, /* 00 00 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 0 0 000 */
+ 0x64, /* 0 00 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x40, /* 0 000000 */
+ 0x38, /* 00 000 */
+ 0x04, /* 00000 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 116 0x74 't' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x0c, /* 0000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x3c, /* 00 00 */
+ 0x04, /* 00000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+
+ /* 122 0x7a 'z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 123 0x7b '{' */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x04, /* 00000 00 */
+
+ /* 124 0x7c '|' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 125 0x7d '}' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+
+ /* 126 0x7e '~' */
+ 0x00, /* 00000000 */
+ 0x34, /* 00 0 00 */
+ 0x58, /* 0 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 127 0x7f '^?' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 128 0x80 '\200' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+
+ /* 129 0x81 '\201' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 130 0x82 '\202' */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 131 0x83 '\203' */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 132 0x84 '\204' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 133 0x85 '\205' */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 134 0x86 '\206' */
+ 0x18, /* 000 000 */
+ 0x24, /* 00 00 00 */
+ 0x18, /* 000 000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 135 0x87 '\207' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+
+ /* 136 0x88 '\210' */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 137 0x89 '\211' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 138 0x8a '\212' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 139 0x8b '\213' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 140 0x8c '\214' */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 141 0x8d '\215' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 142 0x8e '\216' */
+ 0x84, /* 0000 00 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 143 0x8f '\217' */
+ 0x58, /* 0 0 000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 144 0x90 '\220' */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 145 0x91 '\221' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x5c, /* 0 0 00 */
+ 0x50, /* 0 0 0000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 146 0x92 '\222' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x50, /* 0 0 0000 */
+ 0x50, /* 0 0 0000 */
+ 0x78, /* 0 000 */
+ 0x50, /* 0 0 0000 */
+ 0x50, /* 0 0 0000 */
+ 0x5c, /* 0 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 147 0x93 '\223' */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 148 0x94 '\224' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 149 0x95 '\225' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 150 0x96 '\226' */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 151 0x97 '\227' */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 152 0x98 '\230' */
+ 0x00, /* 00000000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x3c, /* 00 00 */
+ 0x04, /* 00000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+
+ /* 153 0x99 '\231' */
+ 0x84, /* 0000 00 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 154 0x9a '\232' */
+ 0x88, /* 000 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 155 0x9b '\233' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x50, /* 0 0 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 156 0x9c '\234' */
+ 0x30, /* 00 0000 */
+ 0x48, /* 0 00 000 */
+ 0x40, /* 0 000000 */
+ 0x70, /* 0 0000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x44, /* 0 000 00 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 157 0x9d '\235' */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 158 0x9e '\236' */
+ 0x00, /* 00000000 */
+ 0x70, /* 0 0000 */
+ 0x48, /* 0 00 000 */
+ 0x70, /* 0 0000 */
+ 0x48, /* 0 00 000 */
+ 0x5c, /* 0 0 00 */
+ 0x48, /* 0 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 159 0x9f '\237' */
+ 0x00, /* 00000000 */
+ 0x0c, /* 0000 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x60, /* 0 00000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 160 0xa0 '\240' */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 161 0xa1 '\241' */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 162 0xa2 '\242' */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 163 0xa3 '\243' */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x4c, /* 0 00 00 */
+ 0x34, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 164 0xa4 '\244' */
+ 0x34, /* 00 0 00 */
+ 0x58, /* 0 0 000 */
+ 0x00, /* 00000000 */
+ 0x58, /* 0 0 000 */
+ 0x64, /* 0 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 165 0xa5 '\245' */
+ 0x58, /* 0 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x64, /* 0 00 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x4c, /* 0 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 166 0xa6 '\246' */
+ 0x00, /* 00000000 */
+ 0x1c, /* 000 00 */
+ 0x24, /* 00 00 00 */
+ 0x24, /* 00 00 00 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 167 0xa7 '\247' */
+ 0x00, /* 00000000 */
+ 0x18, /* 000 000 */
+ 0x24, /* 00 00 00 */
+ 0x24, /* 00 00 00 */
+ 0x18, /* 000 000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 168 0xa8 '\250' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x40, /* 0 000000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 169 0xa9 '\251' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 170 0xaa '\252' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x04, /* 00000 00 */
+ 0x04, /* 00000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 171 0xab '\253' */
+ 0x20, /* 00 00000 */
+ 0x60, /* 0 00000 */
+ 0x24, /* 00 00 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x44, /* 0 000 00 */
+ 0x08, /* 0000 000 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 172 0xac '\254' */
+ 0x20, /* 00 00000 */
+ 0x60, /* 0 00000 */
+ 0x24, /* 00 00 00 */
+ 0x28, /* 00 0 000 */
+ 0x10, /* 000 0000 */
+ 0x28, /* 00 0 000 */
+ 0x58, /* 0 0 000 */
+ 0x3c, /* 00 00 */
+ 0x08, /* 0000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 173 0xad '\255' */
+ 0x00, /* 00000000 */
+ 0x08, /* 0000 000 */
+ 0x00, /* 00000000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x08, /* 0000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 174 0xae '\256' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x24, /* 00 00 00 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x24, /* 00 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 175 0xaf '\257' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 0 00 000 */
+ 0x24, /* 00 00 00 */
+ 0x24, /* 00 00 00 */
+ 0x48, /* 0 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 176 0xb0 '\260' */
+ 0x11, /* 000 000 */
+ 0x44, /* 0 000 00 */
+ 0x11, /* 000 000 */
+ 0x44, /* 0 000 00 */
+ 0x11, /* 000 000 */
+ 0x44, /* 0 000 00 */
+ 0x11, /* 000 000 */
+ 0x44, /* 0 000 00 */
+ 0x11, /* 000 000 */
+ 0x44, /* 0 000 00 */
+ 0x11, /* 000 000 */
+
+ /* 177 0xb1 '\261' */
+ 0x55, /* 0 0 0 0 */
+ 0xaa, /* 0 0 0 0 */
+ 0x55, /* 0 0 0 0 */
+ 0xaa, /* 0 0 0 0 */
+ 0x55, /* 0 0 0 0 */
+ 0xaa, /* 0 0 0 0 */
+ 0x55, /* 0 0 0 0 */
+ 0xaa, /* 0 0 0 0 */
+ 0x55, /* 0 0 0 0 */
+ 0xaa, /* 0 0 0 0 */
+ 0x55, /* 0 0 0 0 */
+
+ /* 178 0xb2 '\262' */
+ 0xdd, /* 0 0 */
+ 0x77, /* 0 0 */
+ 0xdd, /* 0 0 */
+ 0x77, /* 0 0 */
+ 0xdd, /* 0 0 */
+ 0x77, /* 0 0 */
+ 0xdd, /* 0 0 */
+ 0x77, /* 0 0 */
+ 0xdd, /* 0 0 */
+ 0x77, /* 0 0 */
+ 0xdd, /* 0 0 */
+
+ /* 179 0xb3 '\263' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 180 0xb4 '\264' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 181 0xb5 '\265' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 182 0xb6 '\266' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xe8, /* 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 183 0xb7 '\267' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 184 0xb8 '\270' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 185 0xb9 '\271' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xe8, /* 0 000 */
+ 0x08, /* 0000 000 */
+ 0xe8, /* 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 186 0xba '\272' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 187 0xbb '\273' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 000 */
+ 0x08, /* 0000 000 */
+ 0xe8, /* 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 188 0xbc '\274' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xe8, /* 0 000 */
+ 0x08, /* 0000 000 */
+ 0xf8, /* 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 189 0xbd '\275' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xf8, /* 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 190 0xbe '\276' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 191 0xbf '\277' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf0, /* 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 192 0xc0 '\300' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 193 0xc1 '\301' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 194 0xc2 '\302' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 195 0xc3 '\303' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 196 0xc4 '\304' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 197 0xc5 '\305' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xfc, /* 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 198 0xc6 '\306' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 199 0xc7 '\307' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x2c, /* 00 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 200 0xc8 '\310' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x2c, /* 00 0 00 */
+ 0x20, /* 00 00000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 201 0xc9 '\311' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x20, /* 00 00000 */
+ 0x2c, /* 00 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 202 0xca '\312' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xec, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 203 0xcb '\313' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0xec, /* 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 204 0xcc '\314' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x2c, /* 00 0 00 */
+ 0x20, /* 00 00000 */
+ 0x2c, /* 00 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 205 0xcd '\315' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 206 0xce '\316' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xec, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0xec, /* 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 207 0xcf '\317' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 208 0xd0 '\320' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 209 0xd1 '\321' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 210 0xd2 '\322' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 211 0xd3 '\323' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 212 0xd4 '\324' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 213 0xd5 '\325' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1c, /* 000 00 */
+ 0x10, /* 000 0000 */
+ 0x1c, /* 000 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 214 0xd6 '\326' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 215 0xd7 '\327' */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0xfc, /* 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+
+ /* 216 0xd8 '\330' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xfc, /* 00 */
+ 0x10, /* 000 0000 */
+ 0xfc, /* 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 217 0xd9 '\331' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0xf0, /* 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 218 0xda '\332' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 219 0xdb '\333' */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+
+ /* 220 0xdc '\334' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+
+ /* 221 0xdd '\335' */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+ 0xe0, /* 00000 */
+
+ /* 222 0xde '\336' */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+ 0x1c, /* 000 00 */
+
+ /* 223 0xdf '\337' */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 224 0xe0 '\340' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x24, /* 00 00 00 */
+ 0x58, /* 0 0 000 */
+ 0x50, /* 0 0 0000 */
+ 0x54, /* 0 0 0 00 */
+ 0x2c, /* 00 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 225 0xe1 '\341' */
+ 0x18, /* 000 000 */
+ 0x24, /* 00 00 00 */
+ 0x44, /* 0 000 00 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x58, /* 0 0 000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 226 0xe2 '\342' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 227 0xe3 '\343' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 228 0xe4 '\344' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x24, /* 00 00 00 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x24, /* 00 00 00 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 229 0xe5 '\345' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x30, /* 00 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 230 0xe6 '\346' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x48, /* 0 00 000 */
+ 0x74, /* 0 0 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+
+ /* 231 0xe7 '\347' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x6c, /* 0 0 00 */
+ 0x98, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 232 0xe8 '\350' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 233 0xe9 '\351' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x4c, /* 0 00 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x64, /* 0 00 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 234 0xea '\352' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x28, /* 00 0 000 */
+ 0x6c, /* 0 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 235 0xeb '\353' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x0c, /* 0000 00 */
+ 0x14, /* 000 0 00 */
+ 0x24, /* 00 00 00 */
+ 0x24, /* 00 00 00 */
+ 0x18, /* 000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 236 0xec '\354' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x54, /* 0 0 0 00 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 237 0xed '\355' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x04, /* 00000 00 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x38, /* 00 000 */
+ 0x40, /* 0 000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 238 0xee '\356' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00 00 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x78, /* 0 000 */
+ 0x40, /* 0 000000 */
+ 0x40, /* 0 000000 */
+ 0x3c, /* 00 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 239 0xef '\357' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x44, /* 0 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 240 0xf0 '\360' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 241 0xf1 '\361' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 242 0xf2 '\362' */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x04, /* 00000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x1c, /* 000 00 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 243 0xf3 '\363' */
+ 0x00, /* 00000000 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x20, /* 00 00000 */
+ 0x10, /* 000 0000 */
+ 0x08, /* 0000 000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 244 0xf4 '\364' */
+ 0x00, /* 00000000 */
+ 0x0c, /* 0000 00 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+
+ /* 245 0xf5 '\365' */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x10, /* 000 0000 */
+ 0x60, /* 0 00000 */
+ 0x00, /* 00000000 */
+
+ /* 246 0xf6 '\366' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 0 00 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 247 0xf7 '\367' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x34, /* 00 0 00 */
+ 0x48, /* 0 00 000 */
+ 0x00, /* 00000000 */
+ 0x34, /* 00 0 00 */
+ 0x48, /* 0 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 248 0xf8 '\370' */
+ 0x18, /* 000 000 */
+ 0x24, /* 00 00 00 */
+ 0x24, /* 00 00 00 */
+ 0x18, /* 000 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 249 0xf9 '\371' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x38, /* 00 000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 250 0xfa '\372' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 000 0000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 251 0xfb '\373' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x0c, /* 0000 00 */
+ 0x08, /* 0000 000 */
+ 0x10, /* 000 0000 */
+ 0x50, /* 0 0 0000 */
+ 0x20, /* 00 00000 */
+ 0x20, /* 00 00000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 252 0xfc '\374' */
+ 0x00, /* 00000000 */
+ 0x50, /* 0 0 0000 */
+ 0x28, /* 00 0 000 */
+ 0x28, /* 00 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 253 0xfd '\375' */
+ 0x00, /* 00000000 */
+ 0x70, /* 0 0000 */
+ 0x08, /* 0000 000 */
+ 0x20, /* 00 00000 */
+ 0x78, /* 0 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 254 0xfe '\376' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x38, /* 00 000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 255 0xff '\377' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+} };
+
+
+const struct font_desc font_vga_6x11 = {
+ .idx = VGA6x11_IDX,
+ .name = "ProFont6x11",
+ .width = 6,
+ .height = 11,
+ .charcount = 256,
+ .data = fontdata_6x11.data,
+ /* Try avoiding this font if possible unless on MAC */
+ .pref = -2000,
+};
diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c
new file mode 100644
index 000000000..06ace7792
--- /dev/null
+++ b/lib/fonts/font_6x8.c
@@ -0,0 +1,2577 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+
+#define FONTDATAMAX 2048
+
+static const struct font_data fontdata_6x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 1 0x01 '^A' */
+ 0x78, /* 011110 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0xB4, /* 101101 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 2 0x02 '^B' */
+ 0x78, /* 011110 */
+ 0xFC, /* 111111 */
+ 0xB4, /* 101101 */
+ 0xFC, /* 111111 */
+ 0xB4, /* 101101 */
+ 0xCC, /* 110011 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x6C, /* 011011 */
+ 0x6C, /* 011011 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x78, /* 011110 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 8 0x08 '^H' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xCC, /* 110011 */
+ 0x84, /* 100001 */
+ 0xCC, /* 110011 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x84, /* 100001 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 10 0x0A '^J' */
+ 0xFC, /* 111111 */
+ 0xCC, /* 110011 */
+ 0xB4, /* 101101 */
+ 0x78, /* 011110 */
+ 0xB4, /* 101101 */
+ 0xCC, /* 110011 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 11 0x0B '^K' */
+ 0x3C, /* 001111 */
+ 0x14, /* 000101 */
+ 0x20, /* 001000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 12 0x0C '^L' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 13 0x0D '^M' */
+ 0x18, /* 000110 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x70, /* 011100 */
+ 0x60, /* 011000 */
+ 0x00, /* 000000 */
+
+ /* 14 0x0E '^N' */
+ 0x3C, /* 001111 */
+ 0x24, /* 001001 */
+ 0x3C, /* 001111 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x6C, /* 011011 */
+ 0x6C, /* 011011 */
+ 0x00, /* 000000 */
+
+ /* 15 0x0F '^O' */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x6C, /* 011011 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 16 0x10 '^P' */
+ 0x40, /* 010000 */
+ 0x60, /* 011000 */
+ 0x70, /* 011100 */
+ 0x78, /* 011110 */
+ 0x70, /* 011100 */
+ 0x60, /* 011000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x04, /* 000001 */
+ 0x0C, /* 000011 */
+ 0x1C, /* 000111 */
+ 0x3C, /* 001111 */
+ 0x1C, /* 000111 */
+ 0x0C, /* 000011 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+
+ /* 18 0x12 '^R' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 19 0x13 '^S' */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+
+ /* 20 0x14 '^T' */
+ 0x3C, /* 001111 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x3C, /* 001111 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x14, /* 000101 */
+ 0x00, /* 000000 */
+
+ /* 21 0x15 '^U' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x30, /* 001100 */
+ 0x28, /* 001010 */
+ 0x14, /* 000101 */
+ 0x0C, /* 000011 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0xF8, /* 111110 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+
+ /* 23 0x17 '^W' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+
+ /* 24 0x18 '^X' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 26 0x1A '^Z' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 27 0x1B '^[' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 28 0x1C '^\' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 29 0x1D '^]' */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x84, /* 100001 */
+ 0xFC, /* 111111 */
+ 0x84, /* 100001 */
+ 0x48, /* 010010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 30 0x1E '^^' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 31 0x1F '^_' */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x7C, /* 011111 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 33 0x21 '!' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 34 0x22 '"' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 36 0x24 '$' */
+ 0x10, /* 000000 */
+ 0x38, /* 001000 */
+ 0x40, /* 010000 */
+ 0x30, /* 001000 */
+ 0x08, /* 000000 */
+ 0x70, /* 011000 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 37 0x25 '%' */
+ 0x64, /* 011001 */
+ 0x64, /* 011001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x4C, /* 010011 */
+ 0x4C, /* 010011 */
+ 0x00, /* 000000 */
+
+ /* 38 0x26 '&' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 39 0x27 ''' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 40 0x28 '(' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 41 0x29 ')' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 42 0x2A '*' */
+ 0x10, /* 000100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 43 0x2B '+' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 44 0x2C ',' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x20, /* 001000 */
+
+ /* 45 0x2D '-' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 46 0x2E '.' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 47 0x2F '/' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+
+ /* 48 0x30 '0' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x54, /* 010101 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 49 0x31 '1' */
+ 0x10, /* 000100 */
+ 0x30, /* 001100 */
+ 0x50, /* 010100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 50 0x32 '2' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 51 0x33 '3' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x18, /* 000110 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 52 0x34 '4' */
+ 0x08, /* 000010 */
+ 0x18, /* 000110 */
+ 0x28, /* 001010 */
+ 0x48, /* 010010 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 53 0x35 '5' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 54 0x36 '6' */
+ 0x18, /* 000110 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 55 0x37 '7' */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 56 0x38 '8' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 57 0x39 '9' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 58 0x3A ':' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x18, /* 000110 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 59 0x3B ';' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x30, /* 001100 */
+ 0x20, /* 001000 */
+
+ /* 60 0x3C '<' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+
+ /* 61 0x3D '=' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 62 0x3E '>' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 63 0x3F '?' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 64 0x40 '@' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x5C, /* 010111 */
+ 0x54, /* 010101 */
+ 0x5C, /* 010111 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 65 0x41 'A' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 66 0x42 'B' */
+ 0x78, /* 011110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x38, /* 001110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 67 0x43 'C' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 68 0x44 'D' */
+ 0x78, /* 011110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 69 0x45 'E' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 70 0x46 'F' */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 71 0x47 'G' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x5C, /* 010111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 72 0x48 'H' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 73 0x49 'I' */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 74 0x4A 'J' */
+ 0x1C, /* 000111 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 75 0x4B 'K' */
+ 0x44, /* 010001 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x60, /* 011000 */
+ 0x50, /* 010100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 76 0x4C 'L' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 77 0x4D 'M' */
+ 0x44, /* 010001 */
+ 0x6C, /* 011011 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 78 0x4E 'N' */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x54, /* 010101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 79 0x4F 'O' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 80 0x50 'P' */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 81 0x51 'Q' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x54, /* 010101 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 82 0x52 'R' */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x50, /* 010100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 83 0x53 'S' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 84 0x54 'T' */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 85 0x55 'U' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 86 0x56 'V' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 87 0x57 'W' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x6C, /* 011011 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 88 0x58 'X' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 89 0x59 'Y' */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 90 0x5A 'Z' */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 91 0x5B '[' */
+ 0x18, /* 000110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 92 0x5C '\' */
+ 0x40, /* 010000 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x04, /* 000001 */
+
+ /* 93 0x5D ']' */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 94 0x5E '^' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 95 0x5F '_' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+
+ /* 96 0x60 '`' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 98 0x62 'b' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x58, /* 010110 */
+ 0x00, /* 000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 100 0x64 'd' */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x34, /* 001101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 102 0x66 'f' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 000000 */
+ 0x34, /* 001101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 104 0x68 'h' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 105 0x69 'i' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 106 0x6A 'j' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+
+ /* 107 0x6B 'k' */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x48, /* 010010 */
+ 0x50, /* 010100 */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 108 0x6C 'l' */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 109 0x6D 'm' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x68, /* 011010 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x00, /* 000000 */
+
+ /* 110 0x6E 'n' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 111 0x6F 'o' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x78, /* 011110 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x58, /* 010110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 116 0x74 't' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x0C, /* 000011 */
+ 0x00, /* 000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 122 0x7A 'z' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 123 0x7B '{' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 124 0x7C '|' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 125 0x7D '}' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 126 0x7E '~' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 127 0x7F '' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 128 0x80 '\200' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+
+ /* 129 0x81 '\201' */
+ 0x00, /* 000000 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 130 0x82 '\202' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 131 0x83 '\203' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 132 0x84 '\204' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 133 0x85 '\205' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 134 0x86 '\206' */
+ 0x3C, /* 001111 */
+ 0x18, /* 000110 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 135 0x87 '\207' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+
+ /* 136 0x88 '\210' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 137 0x89 '\211' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 138 0x8A '\212' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 139 0x8B '\213' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 140 0x8C '\214' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 141 0x8D '\215' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 142 0x8E '\216' */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 143 0x8F '\217' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 144 0x90 '\220' */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x78, /* 011110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 145 0x91 '\221' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x78, /* 011110 */
+ 0x14, /* 000101 */
+ 0x7C, /* 011111 */
+ 0x50, /* 010100 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 146 0x92 '\222' */
+ 0x3C, /* 001111 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x78, /* 011110 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x5C, /* 010111 */
+ 0x00, /* 000000 */
+
+ /* 147 0x93 '\223' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 148 0x94 '\224' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 149 0x95 '\225' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 150 0x96 '\226' */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 151 0x97 '\227' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 152 0x98 '\230' */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+
+ /* 153 0x99 '\231' */
+ 0x84, /* 100001 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 154 0x9A '\232' */
+ 0x88, /* 100010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 155 0x9B '\233' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x50, /* 010100 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 156 0x9C '\234' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x40, /* 010000 */
+ 0x70, /* 011100 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x78, /* 011110 */
+ 0x00, /* 000000 */
+
+ /* 157 0x9D '\235' */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 158 0x9E '\236' */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x70, /* 011100 */
+ 0x48, /* 010010 */
+ 0x5C, /* 010111 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 159 0x9F '\237' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+ 0x00, /* 000000 */
+
+ /* 160 0xA0 '\240' */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 161 0xA1 '\241' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x30, /* 001100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 162 0xA2 '\242' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 163 0xA3 '\243' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x4C, /* 010011 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 164 0xA4 '\244' */
+ 0x34, /* 001101 */
+ 0x58, /* 010110 */
+ 0x00, /* 000000 */
+ 0x58, /* 010110 */
+ 0x64, /* 011001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 165 0xA5 '\245' */
+ 0x58, /* 010110 */
+ 0x44, /* 010001 */
+ 0x64, /* 011001 */
+ 0x54, /* 010101 */
+ 0x4C, /* 010011 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 166 0xA6 '\246' */
+ 0x38, /* 001110 */
+ 0x04, /* 000001 */
+ 0x3C, /* 001111 */
+ 0x44, /* 010001 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 167 0xA7 '\247' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 168 0xA8 '\250' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x40, /* 010000 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 169 0xA9 '\251' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 170 0xAA '\252' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x04, /* 000001 */
+ 0x04, /* 000001 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 171 0xAB '\253' */
+ 0x20, /* 001000 */
+ 0x24, /* 001001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x44, /* 010001 */
+ 0x08, /* 000010 */
+ 0x1C, /* 000111 */
+
+ /* 172 0xAC '\254' */
+ 0x20, /* 001000 */
+ 0x24, /* 001001 */
+ 0x28, /* 001010 */
+ 0x10, /* 000100 */
+ 0x28, /* 001010 */
+ 0x58, /* 010110 */
+ 0x3C, /* 001111 */
+ 0x08, /* 000010 */
+
+ /* 173 0xAD '\255' */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+
+ /* 174 0xAE '\256' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x24, /* 001001 */
+ 0x48, /* 010010 */
+ 0x90, /* 100100 */
+ 0x48, /* 010010 */
+ 0x24, /* 001001 */
+ 0x00, /* 000000 */
+
+ /* 175 0xAF '\257' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x90, /* 100100 */
+ 0x48, /* 010010 */
+ 0x24, /* 001001 */
+ 0x48, /* 010010 */
+ 0x90, /* 100100 */
+ 0x00, /* 000000 */
+
+ /* 176 0xB0 '\260' */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+ 0x10, /* 000100 */
+ 0x44, /* 010001 */
+
+ /* 177 0xB1 '\261' */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+ 0xA8, /* 101010 */
+ 0x54, /* 010101 */
+
+ /* 178 0xB2 '\262' */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+ 0xDC, /* 110111 */
+ 0x74, /* 011101 */
+
+ /* 179 0xB3 '\263' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 180 0xB4 '\264' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 181 0xB5 '\265' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 182 0xB6 '\266' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 183 0xB7 '\267' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 184 0xB8 '\270' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 185 0xB9 '\271' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x08, /* 000010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 186 0xBA '\272' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 187 0xBB '\273' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF8, /* 111110 */
+ 0x08, /* 000010 */
+ 0xE8, /* 111010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 188 0xBC '\274' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xE8, /* 111010 */
+ 0x08, /* 000010 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 189 0xBD '\275' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xF8, /* 111110 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 190 0xBE '\276' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 191 0xBF '\277' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xF0, /* 111100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 192 0xC0 '\300' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 193 0xC1 '\301' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 194 0xC2 '\302' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 195 0xC3 '\303' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 196 0xC4 '\304' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 197 0xC5 '\305' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 198 0xC6 '\306' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 199 0xC7 '\307' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 200 0xC8 '\310' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x20, /* 001000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 201 0xC9 '\311' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x20, /* 001000 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 202 0xCA '\312' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xEC, /* 111011 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 203 0xCB '\313' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xEC, /* 111011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 204 0xCC '\314' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x2C, /* 001011 */
+ 0x20, /* 001000 */
+ 0x2C, /* 001011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 205 0xCD '\315' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 206 0xCE '\316' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xEC, /* 111011 */
+ 0x00, /* 000000 */
+ 0xEC, /* 111011 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 207 0xCF '\317' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 208 0xD0 '\320' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 209 0xD1 '\321' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 210 0xD2 '\322' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 211 0xD3 '\323' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 212 0xD4 '\324' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 213 0xD5 '\325' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 214 0xD6 '\326' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 215 0xD7 '\327' */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0xFC, /* 111111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+
+ /* 216 0xD8 '\330' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0xFC, /* 111111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 217 0xD9 '\331' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0xF0, /* 111100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 218 0xDA '\332' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x1C, /* 000111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 219 0xDB '\333' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 220 0xDC '\334' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+
+ /* 221 0xDD '\335' */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+ 0xE0, /* 111000 */
+
+ /* 222 0xDE '\336' */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+ 0x1C, /* 000111 */
+
+ /* 223 0xDF '\337' */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 224 0xE0 '\340' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x34, /* 001101 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x34, /* 001101 */
+ 0x00, /* 000000 */
+
+ /* 225 0xE1 '\341' */
+ 0x24, /* 001001 */
+ 0x44, /* 010001 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x58, /* 010110 */
+ 0x40, /* 010000 */
+
+ /* 226 0xE2 '\342' */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 227 0xE3 '\343' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x28, /* 001010 */
+ 0x00, /* 000000 */
+
+ /* 228 0xE4 '\344' */
+ 0x7C, /* 011111 */
+ 0x24, /* 001001 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x24, /* 001001 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 229 0xE5 '\345' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x3C, /* 001111 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+
+ /* 230 0xE6 '\346' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x74, /* 011101 */
+ 0x40, /* 010000 */
+
+ /* 231 0xE7 '\347' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x0C, /* 000011 */
+ 0x00, /* 000000 */
+
+ /* 232 0xE8 '\350' */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 233 0xE9 '\351' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x7C, /* 011111 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 234 0xEA '\352' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x28, /* 001010 */
+ 0x6C, /* 011011 */
+ 0x00, /* 000000 */
+
+ /* 235 0xEB '\353' */
+ 0x18, /* 000110 */
+ 0x20, /* 001000 */
+ 0x18, /* 000110 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x24, /* 001001 */
+ 0x18, /* 000110 */
+ 0x00, /* 000000 */
+
+ /* 236 0xEC '\354' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 237 0xED '\355' */
+ 0x00, /* 000000 */
+ 0x04, /* 000001 */
+ 0x38, /* 001110 */
+ 0x54, /* 010101 */
+ 0x54, /* 010101 */
+ 0x38, /* 001110 */
+ 0x40, /* 010000 */
+ 0x00, /* 000000 */
+
+ /* 238 0xEE '\356' */
+ 0x3C, /* 001111 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x38, /* 001110 */
+ 0x40, /* 010000 */
+ 0x40, /* 010000 */
+ 0x3C, /* 001111 */
+ 0x00, /* 000000 */
+
+ /* 239 0xEF '\357' */
+ 0x38, /* 001110 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x44, /* 010001 */
+ 0x00, /* 000000 */
+
+ /* 240 0xF0 '\360' */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0xFC, /* 111111 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 241 0xF1 '\361' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x7C, /* 011111 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+
+ /* 242 0xF2 '\362' */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 243 0xF3 '\363' */
+ 0x08, /* 000010 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x10, /* 000100 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 244 0xF4 '\364' */
+ 0x0C, /* 000011 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+
+ /* 245 0xF5 '\365' */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x10, /* 000100 */
+ 0x60, /* 011000 */
+
+ /* 246 0xF6 '\366' */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x7C, /* 011111 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 247 0xF7 '\367' */
+ 0x00, /* 000000 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x20, /* 001000 */
+ 0x54, /* 010101 */
+ 0x08, /* 000010 */
+ 0x00, /* 000000 */
+
+ /* 248 0xF8 '\370' */
+ 0x30, /* 001100 */
+ 0x48, /* 010010 */
+ 0x48, /* 010010 */
+ 0x30, /* 001100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 249 0xF9 '\371' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x38, /* 001110 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 250 0xFA '\372' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x10, /* 000100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 251 0xFB '\373' */
+ 0x04, /* 000001 */
+ 0x08, /* 000010 */
+ 0x08, /* 000010 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x20, /* 001000 */
+ 0x20, /* 001000 */
+ 0x00, /* 000000 */
+
+ /* 252 0xFC '\374' */
+ 0x60, /* 011000 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x50, /* 010100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 253 0xFD '\375' */
+ 0x60, /* 011000 */
+ 0x10, /* 000100 */
+ 0x20, /* 001000 */
+ 0x70, /* 011100 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+
+ /* 254 0xFE '\376' */
+ 0x00, /* 000000 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x38, /* 001110 */
+ 0x00, /* 000000 */
+
+ /* 255 0xFF '\377' */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+ 0x00, /* 000000 */
+} };
+
+const struct font_desc font_6x8 = {
+ .idx = FONT6x8_IDX,
+ .name = "6x8",
+ .width = 6,
+ .height = 8,
+ .charcount = 256,
+ .data = fontdata_6x8.data,
+ .pref = 0,
+};
diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c
new file mode 100644
index 000000000..a2f561c9f
--- /dev/null
+++ b/lib/fonts/font_7x14.c
@@ -0,0 +1,4119 @@
+// SPDX-License-Identifier: GPL-2.0
+/**************************************/
+/* this file adapted from font_8x16.c */
+/* by Jurriaan Kalkman 05-2005 */
+/**************************************/
+
+#include <linux/font.h>
+
+#define FONTDATAMAX 3584
+
+static const struct font_data fontdata_7x14 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 1 0x01 '^A' */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0x82, /* 1000001 */
+ 0xaa, /* 1010101 */
+ 0x82, /* 1000001 */
+ 0x82, /* 1000001 */
+ 0xba, /* 1011101 */
+ 0x92, /* 1001001 */
+ 0x82, /* 1000001 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 2 0x02 '^B' */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0xfe, /* 1111111 */
+ 0xd6, /* 1101011 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xc6, /* 1100011 */
+ 0xee, /* 1110111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x7c, /* 0111110 */
+ 0xfe, /* 1111111 */
+ 0x7c, /* 0111110 */
+ 0x38, /* 0011100 */
+ 0x18, /* 0001100 */
+ 0x10, /* 0001000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x7c, /* 0111110 */
+ 0xfe, /* 1111111 */
+ 0x7c, /* 0111110 */
+ 0x38, /* 0011100 */
+ 0x10, /* 0001000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x38, /* 0011100 */
+ 0x38, /* 0011100 */
+ 0xee, /* 1110111 */
+ 0xee, /* 1110111 */
+ 0xee, /* 1110111 */
+ 0x10, /* 0001000 */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x7c, /* 0111110 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0x7c, /* 0111110 */
+ 0x10, /* 0001000 */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 8 0x08 '^H' */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xe6, /* 1110011 */
+ 0xc2, /* 1100001 */
+ 0xc2, /* 1100001 */
+ 0xe6, /* 1110011 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x44, /* 0100010 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 10 0x0a '^J' */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xc6, /* 1100011 */
+ 0x92, /* 1001001 */
+ 0xba, /* 1011101 */
+ 0x92, /* 1001001 */
+ 0xc6, /* 1100011 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+
+ /* 11 0x0b '^K' */
+ 0x00, /* 0000000 */
+ 0x1e, /* 0001111 */
+ 0x0e, /* 0000111 */
+ 0x1a, /* 0001101 */
+ 0x1a, /* 0001101 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 12 0x0c '^L' */
+ 0x00, /* 0000000 */
+ 0x3c, /* 0011110 */
+ 0x66, /* 0110011 */
+ 0x66, /* 0110011 */
+ 0x66, /* 0110011 */
+ 0x66, /* 0110011 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x7e, /* 0111111 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 13 0x0d '^M' */
+ 0x00, /* 0000000 */
+ 0x3e, /* 0011111 */
+ 0x36, /* 0011011 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x70, /* 0111000 */
+ 0xf0, /* 1111000 */
+ 0xe0, /* 1110000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 14 0x0e '^N' */
+ 0x00, /* 0000000 */
+ 0x7e, /* 0111111 */
+ 0x66, /* 0110011 */
+ 0x7e, /* 0111111 */
+ 0x66, /* 0110011 */
+ 0x66, /* 0110011 */
+ 0x66, /* 0110011 */
+ 0x66, /* 0110011 */
+ 0x6e, /* 0110111 */
+ 0xee, /* 1110111 */
+ 0xec, /* 1110110 */
+ 0xc0, /* 1100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 15 0x0f '^O' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x10, /* 0001000 */
+ 0x10, /* 0001000 */
+ 0xd6, /* 1101011 */
+ 0x38, /* 0011100 */
+ 0xee, /* 1110111 */
+ 0x38, /* 0011100 */
+ 0xd6, /* 1101011 */
+ 0x10, /* 0001000 */
+ 0x10, /* 0001000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 16 0x10 '^P' */
+ 0x00, /* 0000000 */
+ 0x80, /* 1000000 */
+ 0xc0, /* 1100000 */
+ 0xe0, /* 1110000 */
+ 0xf0, /* 1111000 */
+ 0xfc, /* 1111110 */
+ 0xf0, /* 1111000 */
+ 0xe0, /* 1110000 */
+ 0xc0, /* 1100000 */
+ 0x80, /* 1000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x00, /* 0000000 */
+ 0x04, /* 0000010 */
+ 0x0c, /* 0000110 */
+ 0x1c, /* 0001110 */
+ 0x3c, /* 0011110 */
+ 0xfc, /* 1111110 */
+ 0x3c, /* 0011110 */
+ 0x1c, /* 0001110 */
+ 0x0c, /* 0000110 */
+ 0x04, /* 0000010 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 18 0x12 '^R' */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x7e, /* 0111111 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x7e, /* 0111111 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 19 0x13 '^S' */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 20 0x14 '^T' */
+ 0x00, /* 0000000 */
+ 0x7e, /* 0111111 */
+ 0xd4, /* 1101010 */
+ 0xd4, /* 1101010 */
+ 0xd4, /* 1101010 */
+ 0x74, /* 0111010 */
+ 0x14, /* 0001010 */
+ 0x14, /* 0001010 */
+ 0x14, /* 0001010 */
+ 0x14, /* 0001010 */
+ 0x16, /* 0001011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 21 0x15 '^U' */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x60, /* 0110000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x18, /* 0001100 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 23 0x17 '^W' */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x7e, /* 0111111 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x7e, /* 0111111 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x7e, /* 0111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 24 0x18 '^X' */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x7e, /* 0111111 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x7e, /* 0111111 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0xfc, /* 1111110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xfc, /* 1111110 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x28, /* 0010100 */
+ 0x6c, /* 0110110 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x28, /* 0010100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 31 0x1f '^_' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 33 0x21 '!' */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x3c, /* 0011110 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 34 0x22 '"' */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x28, /* 0010100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 36 0x24 '$' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xc4, /* 1100010 */
+ 0xc0, /* 1100000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x8c, /* 1000110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xc0, /* 1100000 */
+ 0xc4, /* 1100010 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xcc, /* 1100110 */
+ 0x8c, /* 1000110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 38 0x26 '&' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x78, /* 0111100 */
+ 0xde, /* 1101111 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xdc, /* 1101110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 39 0x27 ''' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 40 0x28 '(' */
+ 0x00, /* 0000000 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x0c, /* 0000110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 41 0x29 ')' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0xfe, /* 1111111 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x10, /* 0001000 */
+ 0x10, /* 0001000 */
+ 0x7c, /* 0111110 */
+ 0x10, /* 0001000 */
+ 0x10, /* 0001000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 45 0x2d '-' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 47 0x2f '/' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x04, /* 0000010 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0x80, /* 1000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 48 0x30 '0' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xdc, /* 1101110 */
+ 0xec, /* 1110110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 49 0x31 '1' */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x38, /* 0011100 */
+ 0x78, /* 0111100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 50 0x32 '2' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 51 0x33 '3' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x38, /* 0011100 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 52 0x34 '4' */
+ 0x00, /* 0000000 */
+ 0x0c, /* 0000110 */
+ 0x1c, /* 0001110 */
+ 0x3c, /* 0011110 */
+ 0x6c, /* 0110110 */
+ 0xcc, /* 1100110 */
+ 0xfe, /* 1111111 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 53 0x35 '5' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xf8, /* 1111100 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 54 0x36 '6' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xf8, /* 1111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 55 0x37 '7' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 56 0x38 '8' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 57 0x39 '9' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x7c, /* 0111110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 60 0x3c '<' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x04, /* 0000010 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x0c, /* 0000110 */
+ 0x04, /* 0000010 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 62 0x3e '>' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x40, /* 0100000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x40, /* 0100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 63 0x3f '?' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 64 0x40 '@' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xdc, /* 1101110 */
+ 0xdc, /* 1101110 */
+ 0xd8, /* 1101100 */
+ 0xc0, /* 1100000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 65 0x41 'A' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 66 0x42 'B' */
+ 0x00, /* 0000000 */
+ 0xf8, /* 1111100 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x78, /* 0111100 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xf8, /* 1111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 67 0x43 'C' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc4, /* 1100010 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc4, /* 1100010 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 68 0x44 'D' */
+ 0x00, /* 0000000 */
+ 0xf0, /* 1111000 */
+ 0xd8, /* 1101100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xd8, /* 1101100 */
+ 0xf0, /* 1111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 69 0x45 'E' */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0x6c, /* 0110110 */
+ 0x64, /* 0110010 */
+ 0x68, /* 0110100 */
+ 0x78, /* 0111100 */
+ 0x68, /* 0110100 */
+ 0x60, /* 0110000 */
+ 0x64, /* 0110010 */
+ 0x6c, /* 0110110 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 70 0x46 'F' */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0x64, /* 0110010 */
+ 0x60, /* 0110000 */
+ 0x68, /* 0110100 */
+ 0x78, /* 0111100 */
+ 0x68, /* 0110100 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 71 0x47 'G' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc4, /* 1100010 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xdc, /* 1101110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x6c, /* 0110110 */
+ 0x34, /* 0011010 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 72 0x48 'H' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 73 0x49 'I' */
+ 0x00, /* 0000000 */
+ 0x3c, /* 0011110 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 74 0x4a 'J' */
+ 0x00, /* 0000000 */
+ 0x1c, /* 0001110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 75 0x4b 'K' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xd8, /* 1101100 */
+ 0xf0, /* 1111000 */
+ 0xf0, /* 1111000 */
+ 0xd8, /* 1101100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 76 0x4c 'L' */
+ 0x00, /* 0000000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc4, /* 1100010 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 77 0x4d 'M' */
+ 0x00, /* 0000000 */
+ 0xc6, /* 1100011 */
+ 0xee, /* 1110111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xd6, /* 1101011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 78 0x4e 'N' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xec, /* 1110110 */
+ 0xec, /* 1110110 */
+ 0xfc, /* 1111110 */
+ 0xdc, /* 1101110 */
+ 0xdc, /* 1101110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 79 0x4f 'O' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 80 0x50 'P' */
+ 0x00, /* 0000000 */
+ 0xf8, /* 1111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xf8, /* 1111100 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 81 0x51 'Q' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xdc, /* 1101110 */
+ 0x78, /* 0111100 */
+ 0x18, /* 0001100 */
+ 0x1c, /* 0001110 */
+ 0x00, /* 0000000 */
+
+ /* 82 0x52 'R' */
+ 0x00, /* 0000000 */
+ 0xf8, /* 1111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xf8, /* 1111100 */
+ 0xd8, /* 1101100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 83 0x53 'S' */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0xc4, /* 1100010 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0x60, /* 0110000 */
+ 0x38, /* 0011100 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x8c, /* 1000110 */
+ 0xf8, /* 1111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 84 0x54 'T' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0xb4, /* 1011010 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 85 0x55 'U' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 86 0x56 'V' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 87 0x57 'W' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xfc, /* 1111110 */
+ 0x48, /* 0100100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 88 0x58 'X' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 89 0x59 'Y' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 90 0x5a 'Z' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0x8c, /* 1000110 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc4, /* 1100010 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 91 0x5b '[' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 92 0x5c '\' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x80, /* 1000000 */
+ 0xc0, /* 1100000 */
+ 0xe0, /* 1110000 */
+ 0x70, /* 0111000 */
+ 0x38, /* 0011100 */
+ 0x1c, /* 0001110 */
+ 0x0c, /* 0000110 */
+ 0x04, /* 0000010 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 93 0x5d ']' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 94 0x5e '^' */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc6, /* 1100011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+
+ /* 96 0x60 '`' */
+ 0x00, /* 0000000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 98 0x62 'b' */
+ 0x00, /* 0000000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xf0, /* 1111000 */
+ 0xd8, /* 1101100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xf8, /* 1111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 100 0x64 'd' */
+ 0x00, /* 0000000 */
+ 0x1c, /* 0001110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x3c, /* 0011110 */
+ 0x6c, /* 0110110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 102 0x66 'f' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x64, /* 0110010 */
+ 0x60, /* 0110000 */
+ 0xf0, /* 1111000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0xf0, /* 1111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x76, /* 0111011 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x7c, /* 0111110 */
+ 0x0c, /* 0000110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+
+ /* 104 0x68 'h' */
+ 0x00, /* 0000000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xd8, /* 1101100 */
+ 0xec, /* 1110110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 105 0x69 'i' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 106 0x6a 'j' */
+ 0x00, /* 0000000 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x00, /* 0000000 */
+ 0x1c, /* 0001110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+
+ /* 107 0x6b 'k' */
+ 0x00, /* 0000000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0xd8, /* 1101100 */
+ 0xf0, /* 1111000 */
+ 0xf0, /* 1111000 */
+ 0xd8, /* 1101100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 108 0x6c 'l' */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xec, /* 1110110 */
+ 0xfe, /* 1111111 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xb8, /* 1011100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xb8, /* 1011100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xf8, /* 1111100 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x74, /* 0111010 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x7c, /* 0111110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xb8, /* 1011100 */
+ 0xec, /* 1110110 */
+ 0xcc, /* 1100110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 116 0x74 't' */
+ 0x00, /* 0000000 */
+ 0x10, /* 0001000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x36, /* 0011011 */
+ 0x1c, /* 0001110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x7c, /* 0111110 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0xf0, /* 1111000 */
+
+ /* 122 0x7a 'z' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 123 0x7b '{' */
+ 0x00, /* 0000000 */
+ 0x1c, /* 0001110 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xe0, /* 1110000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x1c, /* 0001110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 124 0x7c '|' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 125 0x7d '}' */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x0e, /* 0000111 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 126 0x7e '~' */
+ 0x00, /* 0000000 */
+ 0xec, /* 1110110 */
+ 0xb8, /* 1011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 127 0x7f '' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 128 0x80 'Ç' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc4, /* 1100010 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc4, /* 1100010 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x18, /* 0001100 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+
+ /* 129 0x81 'ü' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 130 0x82 'é' */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 131 0x83 'â' */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 132 0x84 'ä' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 133 0x85 'à' */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 134 0x86 'å' */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 135 0x87 'ç' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xe0, /* 1110000 */
+
+ /* 136 0x88 'ê' */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 137 0x89 'ë' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 138 0x8a 'è' */
+ 0xc0, /* 1100000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 139 0x8b 'ï' */
+ 0x00, /* 0000000 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x3c, /* 0011110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 140 0x8c 'î' */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 141 0x8d 'ì' */
+ 0xc0, /* 1100000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 142 0x8e 'Ä' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 143 0x8f 'Å' */
+ 0x30, /* 0011000 */
+ 0x48, /* 0100100 */
+ 0x48, /* 0100100 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 144 0x90 'É' */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xc4, /* 1100010 */
+ 0xd0, /* 1101000 */
+ 0xf0, /* 1111000 */
+ 0xd0, /* 1101000 */
+ 0xc4, /* 1100010 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 145 0x91 'æ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xec, /* 1110110 */
+ 0x36, /* 0011011 */
+ 0x36, /* 0011011 */
+ 0x7e, /* 0111111 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0x6e, /* 0110111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 146 0x92 'Æ' */
+ 0x00, /* 0000000 */
+ 0x3e, /* 0011111 */
+ 0x6c, /* 0110110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xfe, /* 1111111 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xce, /* 1100111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 147 0x93 'ô' */
+ 0x10, /* 0001000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 148 0x94 'ö' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 149 0x95 'ò' */
+ 0xc0, /* 1100000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 150 0x96 'û' */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 151 0x97 'ù' */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 152 0x98 'ÿ' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x7c, /* 0111110 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x70, /* 0111000 */
+
+ /* 153 0x99 'Ö' */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 154 0x9a 'Ü' */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 155 0x9b '¢' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0x7c, /* 0111110 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 156 0x9c '£' */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x64, /* 0110010 */
+ 0x60, /* 0110000 */
+ 0xf0, /* 1111000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0xe6, /* 1110011 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 157 0x9d '¥' */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 158 0x9e '₧' */
+ 0xf8, /* 1111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xf8, /* 1111100 */
+ 0xc4, /* 1100010 */
+ 0xcc, /* 1100110 */
+ 0xde, /* 1101111 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xc6, /* 1100011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 159 0x9f 'ƒ' */
+ 0x1c, /* 0001110 */
+ 0x36, /* 0011011 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xb0, /* 1011000 */
+ 0xe0, /* 1110000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 160 0xa0 'á' */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 161 0xa1 'í' */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 162 0xa2 'ó' */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 163 0xa3 'ú' */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 164 0xa4 'ñ' */
+ 0x00, /* 0000000 */
+ 0x76, /* 0111011 */
+ 0xdc, /* 1101110 */
+ 0x00, /* 0000000 */
+ 0xb8, /* 1011100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 165 0xa5 'Ñ' */
+ 0x76, /* 0111011 */
+ 0xdc, /* 1101110 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xec, /* 1110110 */
+ 0xec, /* 1110110 */
+ 0xfc, /* 1111110 */
+ 0xdc, /* 1101110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 166 0xa6 'ª' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 167 0xa7 'º' */
+ 0x00, /* 0000000 */
+ 0x70, /* 0111000 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+ 0xf8, /* 1111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 168 0xa8 '¿' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 169 0xa9 '⌐' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 170 0xaa '¬' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 171 0xab '½' */
+ 0x60, /* 0110000 */
+ 0xe0, /* 1110000 */
+ 0x62, /* 0110001 */
+ 0x66, /* 0110011 */
+ 0x6c, /* 0110110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0xb8, /* 1011100 */
+ 0x4c, /* 0100110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x7c, /* 0111110 */
+
+ /* 172 0xac '¼' */
+ 0x60, /* 0110000 */
+ 0xe0, /* 1110000 */
+ 0x62, /* 0110001 */
+ 0x66, /* 0110011 */
+ 0x6c, /* 0110110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x6c, /* 0110110 */
+ 0xdc, /* 1101110 */
+ 0xb4, /* 1011010 */
+ 0x7e, /* 0111111 */
+ 0x0c, /* 0000110 */
+ 0x0c, /* 0000110 */
+ 0x00, /* 0000000 */
+
+ /* 173 0xad '¡' */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 174 0xae '«' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x36, /* 0011011 */
+ 0x6c, /* 0110110 */
+ 0xd8, /* 1101100 */
+ 0x6c, /* 0110110 */
+ 0x36, /* 0011011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 175 0xaf '»' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xd8, /* 1101100 */
+ 0x6c, /* 0110110 */
+ 0x36, /* 0011011 */
+ 0x6c, /* 0110110 */
+ 0xd8, /* 1101100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 176 0xb0 '░' */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+ 0x88, /* 1000100 */
+ 0x22, /* 0010001 */
+
+ /* 177 0xb1 '▒' */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+ 0x54, /* 0101010 */
+ 0xaa, /* 1010101 */
+
+ /* 178 0xb2 '▓' */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+ 0xee, /* 1110111 */
+ 0xba, /* 1011101 */
+
+ /* 179 0xb3 '│' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 180 0xb4 '┤' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 181 0xb5 '╡' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 182 0xb6 '╢' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xec, /* 1110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 183 0xb7 '╖' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 184 0xb8 '╕' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 185 0xb9 '╣' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xec, /* 1110110 */
+ 0x0c, /* 0000110 */
+ 0xec, /* 1110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 186 0xba '║' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 187 0xbb '╗' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x0c, /* 0000110 */
+ 0xec, /* 1110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 188 0xbc '╝' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xec, /* 1110110 */
+ 0x0c, /* 0000110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 189 0xbd '╜' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 190 0xbe '╛' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 191 0xbf '┐' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xf0, /* 1111000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 192 0xc0 '└' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 193 0xc1 '┴' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 194 0xc2 '┬' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 195 0xc3 '├' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 196 0xc4 '─' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 197 0xc5 '┼' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfe, /* 1111111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 198 0xc6 '╞' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 199 0xc7 '╟' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6e, /* 0110111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 200 0xc8 '╚' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6e, /* 0110111 */
+ 0x60, /* 0110000 */
+ 0x7e, /* 0111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 201 0xc9 '╔' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7e, /* 0111111 */
+ 0x60, /* 0110000 */
+ 0x6e, /* 0110111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 202 0xca '╩' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xee, /* 1110111 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 203 0xcb '╦' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0xee, /* 1110111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 204 0xcc '╠' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6e, /* 0110111 */
+ 0x60, /* 0110000 */
+ 0x6e, /* 0110111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 205 0xcd '═' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 206 0xce '╬' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xee, /* 1110111 */
+ 0x00, /* 0000000 */
+ 0xee, /* 1110111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 207 0xcf '╧' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 208 0xd0 '╨' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 209 0xd1 '╤' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 210 0xd2 '╥' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 211 0xd3 '╙' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x7e, /* 0111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 212 0xd4 '╘' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 213 0xd5 '╒' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 214 0xd6 '╓' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7e, /* 0111111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 215 0xd7 '╫' */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+
+ /* 216 0xd8 '╪' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfe, /* 1111111 */
+ 0x30, /* 0011000 */
+ 0xfe, /* 1111111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 217 0xd9 '┘' */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xf0, /* 1111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 218 0xda '┌' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x3e, /* 0011111 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 219 0xdb '█' */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+
+ /* 220 0xdc '▄' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+
+ /* 221 0xdd '▌' */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+ 0xe0, /* 1110000 */
+
+ /* 222 0xde '▐' */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+ 0x1e, /* 0001111 */
+
+ /* 223 0xdf '▀' */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 224 0xe0 'α' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x76, /* 0111011 */
+ 0xdc, /* 1101110 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xdc, /* 1101110 */
+ 0x76, /* 0111011 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 225 0xe1 'ß' */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xd8, /* 1101100 */
+ 0xcc, /* 1100110 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 226 0xe2 'Γ' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 227 0xe3 'π' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfe, /* 1111111 */
+ 0xfe, /* 1111111 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 228 0xe4 'Σ' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 229 0xe5 'σ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7e, /* 0111111 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 230 0xe6 'µ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xf8, /* 1111100 */
+ 0xc0, /* 1100000 */
+ 0xc0, /* 1100000 */
+ 0x80, /* 1000000 */
+
+ /* 231 0xe7 'τ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x76, /* 0111011 */
+ 0xdc, /* 1101110 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 232 0xe8 'Φ' */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x30, /* 0011000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 233 0xe9 'Θ' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xfc, /* 1111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 234 0xea 'Ω' */
+ 0x00, /* 0000000 */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0xc6, /* 1100011 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0xee, /* 1110111 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 235 0xeb 'δ' */
+ 0x00, /* 0000000 */
+ 0x3c, /* 0011110 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x7c, /* 0111110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x78, /* 0111100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 236 0xec '∞' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 237 0xed 'φ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x06, /* 0000011 */
+ 0x0c, /* 0000110 */
+ 0x7c, /* 0111110 */
+ 0xd6, /* 1101011 */
+ 0xd6, /* 1101011 */
+ 0xe6, /* 1110011 */
+ 0x7c, /* 0111110 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 238 0xee 'ε' */
+ 0x00, /* 0000000 */
+ 0x1c, /* 0001110 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x7c, /* 0111110 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x1c, /* 0001110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 239 0xef '∩' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0xcc, /* 1100110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 240 0xf0 '≡' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 241 0xf1 '±' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0xfc, /* 1111110 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 242 0xf2 '≥' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x0c, /* 0000110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 243 0xf3 '≤' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x60, /* 0110000 */
+ 0xc0, /* 1100000 */
+ 0x60, /* 0110000 */
+ 0x30, /* 0011000 */
+ 0x18, /* 0001100 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 244 0xf4 '⌠' */
+ 0x00, /* 0000000 */
+ 0x1c, /* 0001110 */
+ 0x36, /* 0011011 */
+ 0x36, /* 0011011 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+
+ /* 245 0xf5 '⌡' */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0x70, /* 0111000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 246 0xf6 '÷' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 247 0xf7 '≈' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x76, /* 0111011 */
+ 0xdc, /* 1101110 */
+ 0x00, /* 0000000 */
+ 0x76, /* 0111011 */
+ 0xdc, /* 1101110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 248 0xf8 '°' */
+ 0x38, /* 0011100 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 249 0xf9 '·' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 250 0xfa '•' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x30, /* 0011000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 251 0xfb '√' */
+ 0x1e, /* 0001111 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0x18, /* 0001100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0xd8, /* 1101100 */
+ 0x78, /* 0111100 */
+ 0x38, /* 0011100 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 252 0xfc 'ⁿ' */
+ 0xd8, /* 1101100 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x6c, /* 0110110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 253 0xfd '²' */
+ 0x78, /* 0111100 */
+ 0xcc, /* 1100110 */
+ 0x18, /* 0001100 */
+ 0x30, /* 0011000 */
+ 0x64, /* 0110010 */
+ 0xfc, /* 1111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 254 0xfe '■' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x7c, /* 0111110 */
+ 0x7c, /* 0111110 */
+ 0x7c, /* 0111110 */
+ 0x7c, /* 0111110 */
+ 0x7c, /* 0111110 */
+ 0x7c, /* 0111110 */
+ 0x7c, /* 0111110 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+
+ /* 255 0xff ' ' */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+ 0x00, /* 0000000 */
+} };
+
+
+const struct font_desc font_7x14 = {
+ .idx = FONT7x14_IDX,
+ .name = "7x14",
+ .width = 7,
+ .height = 14,
+ .charcount = 256,
+ .data = fontdata_7x14.data,
+ .pref = 0,
+};
diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c
new file mode 100644
index 000000000..06ae14088
--- /dev/null
+++ b/lib/fonts/font_8x16.c
@@ -0,0 +1,4634 @@
+// SPDX-License-Identifier: GPL-2.0
+/**********************************************/
+/* */
+/* Font file generated by cpi2fnt */
+/* */
+/**********************************************/
+
+#include <linux/font.h>
+#include <linux/module.h>
+
+#define FONTDATAMAX 4096
+
+static const struct font_data fontdata_8x16 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 1 0x01 '^A' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x81, /* 10000001 */
+ 0xa5, /* 10100101 */
+ 0x81, /* 10000001 */
+ 0x81, /* 10000001 */
+ 0xbd, /* 10111101 */
+ 0x99, /* 10011001 */
+ 0x81, /* 10000001 */
+ 0x81, /* 10000001 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 2 0x02 '^B' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xff, /* 11111111 */
+ 0xdb, /* 11011011 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xc3, /* 11000011 */
+ 0xe7, /* 11100111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0xe7, /* 11100111 */
+ 0xe7, /* 11100111 */
+ 0xe7, /* 11100111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 8 0x08 '^H' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xe7, /* 11100111 */
+ 0xc3, /* 11000011 */
+ 0xc3, /* 11000011 */
+ 0xe7, /* 11100111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x42, /* 01000010 */
+ 0x42, /* 01000010 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 10 0x0a '^J' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xc3, /* 11000011 */
+ 0x99, /* 10011001 */
+ 0xbd, /* 10111101 */
+ 0xbd, /* 10111101 */
+ 0x99, /* 10011001 */
+ 0xc3, /* 11000011 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 11 0x0b '^K' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1e, /* 00011110 */
+ 0x0e, /* 00001110 */
+ 0x1a, /* 00011010 */
+ 0x32, /* 00110010 */
+ 0x78, /* 01111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 12 0x0c '^L' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 13 0x0d '^M' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x33, /* 00110011 */
+ 0x3f, /* 00111111 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x70, /* 01110000 */
+ 0xf0, /* 11110000 */
+ 0xe0, /* 11100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 14 0x0e '^N' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7f, /* 01111111 */
+ 0x63, /* 01100011 */
+ 0x7f, /* 01111111 */
+ 0x63, /* 01100011 */
+ 0x63, /* 01100011 */
+ 0x63, /* 01100011 */
+ 0x63, /* 01100011 */
+ 0x67, /* 01100111 */
+ 0xe7, /* 11100111 */
+ 0xe6, /* 11100110 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 15 0x0f '^O' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xdb, /* 11011011 */
+ 0x3c, /* 00111100 */
+ 0xe7, /* 11100111 */
+ 0x3c, /* 00111100 */
+ 0xdb, /* 11011011 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 16 0x10 '^P' */
+ 0x00, /* 00000000 */
+ 0x80, /* 10000000 */
+ 0xc0, /* 11000000 */
+ 0xe0, /* 11100000 */
+ 0xf0, /* 11110000 */
+ 0xf8, /* 11111000 */
+ 0xfe, /* 11111110 */
+ 0xf8, /* 11111000 */
+ 0xf0, /* 11110000 */
+ 0xe0, /* 11100000 */
+ 0xc0, /* 11000000 */
+ 0x80, /* 10000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x00, /* 00000000 */
+ 0x02, /* 00000010 */
+ 0x06, /* 00000110 */
+ 0x0e, /* 00001110 */
+ 0x1e, /* 00011110 */
+ 0x3e, /* 00111110 */
+ 0xfe, /* 11111110 */
+ 0x3e, /* 00111110 */
+ 0x1e, /* 00011110 */
+ 0x0e, /* 00001110 */
+ 0x06, /* 00000110 */
+ 0x02, /* 00000010 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 18 0x12 '^R' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 19 0x13 '^S' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 20 0x14 '^T' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7f, /* 01111111 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7b, /* 01111011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 21 0x15 '^U' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x0c, /* 00001100 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 23 0x17 '^W' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 24 0x18 '^X' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xfe, /* 11111110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x28, /* 00101000 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x28, /* 00101000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0x7c, /* 01111100 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 31 0x1f '^_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 33 0x21 '!' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 34 0x22 '"' */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x24, /* 00100100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 36 0x24 '$' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc2, /* 11000010 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x86, /* 10000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc2, /* 11000010 */
+ 0xc6, /* 11000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc6, /* 11000110 */
+ 0x86, /* 10000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 38 0x26 '&' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 39 0x27 ''' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 40 0x28 '(' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 41 0x29 ')' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0xff, /* 11111111 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 45 0x2d '-' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 47 0x2f '/' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x02, /* 00000010 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0x80, /* 10000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 48 0x30 '0' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 49 0x31 '1' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x38, /* 00111000 */
+ 0x78, /* 01111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 50 0x32 '2' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 51 0x33 '3' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x3c, /* 00111100 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 52 0x34 '4' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x0c, /* 00001100 */
+ 0x1c, /* 00011100 */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x1e, /* 00011110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 53 0x35 '5' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 54 0x36 '6' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 55 0x37 '7' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 56 0x38 '8' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 57 0x39 '9' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 60 0x3c '<' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 62 0x3e '>' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 63 0x3f '?' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 64 0x40 '@' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xde, /* 11011110 */
+ 0xde, /* 11011110 */
+ 0xde, /* 11011110 */
+ 0xdc, /* 11011100 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 65 0x41 'A' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 66 0x42 'B' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 11111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 67 0x43 'C' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0xc2, /* 11000010 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc2, /* 11000010 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 68 0x44 'D' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 69 0x45 'E' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x66, /* 01100110 */
+ 0x62, /* 01100010 */
+ 0x68, /* 01101000 */
+ 0x78, /* 01111000 */
+ 0x68, /* 01101000 */
+ 0x60, /* 01100000 */
+ 0x62, /* 01100010 */
+ 0x66, /* 01100110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 70 0x46 'F' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x66, /* 01100110 */
+ 0x62, /* 01100010 */
+ 0x68, /* 01101000 */
+ 0x78, /* 01111000 */
+ 0x68, /* 01101000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 71 0x47 'G' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0xc2, /* 11000010 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xde, /* 11011110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x66, /* 01100110 */
+ 0x3a, /* 00111010 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 72 0x48 'H' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 73 0x49 'I' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 74 0x4a 'J' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1e, /* 00011110 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 75 0x4b 'K' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xe6, /* 11100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0x78, /* 01111000 */
+ 0x78, /* 01111000 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 76 0x4c 'L' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf0, /* 11110000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x62, /* 01100010 */
+ 0x66, /* 01100110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 77 0x4d 'M' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xee, /* 11101110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 78 0x4e 'N' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xe6, /* 11100110 */
+ 0xf6, /* 11110110 */
+ 0xfe, /* 11111110 */
+ 0xde, /* 11011110 */
+ 0xce, /* 11001110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 79 0x4f 'O' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 80 0x50 'P' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 11111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 81 0x51 'Q' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xde, /* 11011110 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0x0e, /* 00001110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 82 0x52 'R' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 11111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 83 0x53 'S' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x38, /* 00111000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 84 0x54 'T' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x5a, /* 01011010 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 85 0x55 'U' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 86 0x56 'V' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 87 0x57 'W' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xfe, /* 11111110 */
+ 0xee, /* 11101110 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 88 0x58 'X' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 89 0x59 'Y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 90 0x5a 'Z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x86, /* 10000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc2, /* 11000010 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 91 0x5b '[' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 92 0x5c '\' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x80, /* 10000000 */
+ 0xc0, /* 11000000 */
+ 0xe0, /* 11100000 */
+ 0x70, /* 01110000 */
+ 0x38, /* 00111000 */
+ 0x1c, /* 00011100 */
+ 0x0e, /* 00001110 */
+ 0x06, /* 00000110 */
+ 0x02, /* 00000010 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 93 0x5d ']' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 94 0x5e '^' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 96 0x60 '`' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 98 0x62 'b' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xe0, /* 11100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x78, /* 01111000 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 100 0x64 'd' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1c, /* 00011100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 102 0x66 'f' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1c, /* 00011100 */
+ 0x36, /* 00110110 */
+ 0x32, /* 00110010 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0xcc, /* 11001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+
+ /* 104 0x68 'h' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xe0, /* 11100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x6c, /* 01101100 */
+ 0x76, /* 01110110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 105 0x69 'i' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 106 0x6a 'j' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x0e, /* 00001110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 107 0x6b 'k' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xe0, /* 11100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0x78, /* 01111000 */
+ 0x78, /* 01111000 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 108 0x6c 'l' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xec, /* 11101100 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x1e, /* 00011110 */
+ 0x00, /* 00000000 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x76, /* 01110110 */
+ 0x66, /* 01100110 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x38, /* 00111000 */
+ 0x0c, /* 00001100 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 116 0x74 't' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0xfc, /* 11111100 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x36, /* 00110110 */
+ 0x1c, /* 00011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+
+ /* 122 0x7a 'z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xcc, /* 11001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 123 0x7b '{' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0e, /* 00001110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 124 0x7c '|' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 125 0x7d '}' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x70, /* 01110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 126 0x7e '~' */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 127 0x7f '' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 128 0x80 'Ç' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0xc2, /* 11000010 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc2, /* 11000010 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 129 0x81 'ü' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 130 0x82 'é' */
+ 0x00, /* 00000000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 131 0x83 'â' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 132 0x84 'ä' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 133 0x85 'à' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 134 0x86 'å' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 135 0x87 'ç' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 136 0x88 'ê' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 137 0x89 'ë' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 138 0x8a 'è' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 139 0x8b 'ï' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 140 0x8c 'î' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 141 0x8d 'ì' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 142 0x8e 'Ä' */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 143 0x8f 'Å' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 144 0x90 'É' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x66, /* 01100110 */
+ 0x62, /* 01100010 */
+ 0x68, /* 01101000 */
+ 0x78, /* 01111000 */
+ 0x68, /* 01101000 */
+ 0x62, /* 01100010 */
+ 0x66, /* 01100110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 145 0x91 'æ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xec, /* 11101100 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x7e, /* 01111110 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x6e, /* 01101110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 146 0x92 'Æ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3e, /* 00111110 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xfe, /* 11111110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xce, /* 11001110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 147 0x93 'ô' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 148 0x94 'ö' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 149 0x95 'ò' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 150 0x96 'û' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 151 0x97 'ù' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 152 0x98 'ÿ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+
+ /* 153 0x99 'Ö' */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 154 0x9a 'Ü' */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 155 0x9b '¢' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 156 0x9c '£' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x64, /* 01100100 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xe6, /* 11100110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 157 0x9d '¥' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 158 0x9e '₧' */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xf8, /* 11111000 */
+ 0xc4, /* 11000100 */
+ 0xcc, /* 11001100 */
+ 0xde, /* 11011110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 159 0x9f 'ƒ' */
+ 0x00, /* 00000000 */
+ 0x0e, /* 00001110 */
+ 0x1b, /* 00011011 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 160 0xa0 'á' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 161 0xa1 'í' */
+ 0x00, /* 00000000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 162 0xa2 'ó' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 163 0xa3 'ú' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 164 0xa4 'ñ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 165 0xa5 'Ñ' */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xe6, /* 11100110 */
+ 0xf6, /* 11110110 */
+ 0xfe, /* 11111110 */
+ 0xde, /* 11011110 */
+ 0xce, /* 11001110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 166 0xa6 'ª' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x3e, /* 00111110 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 167 0xa7 'º' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 168 0xa8 '¿' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 169 0xa9 '⌐' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 170 0xaa '¬' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 171 0xab '½' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0xe0, /* 11100000 */
+ 0x62, /* 01100010 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xdc, /* 11011100 */
+ 0x86, /* 10000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x3e, /* 00111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 172 0xac '¼' */
+ 0x00, /* 00000000 */
+ 0x60, /* 01100000 */
+ 0xe0, /* 11100000 */
+ 0x62, /* 01100010 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x66, /* 01100110 */
+ 0xce, /* 11001110 */
+ 0x9a, /* 10011010 */
+ 0x3f, /* 00111111 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 173 0xad '¡' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 174 0xae '«' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x36, /* 00110110 */
+ 0x6c, /* 01101100 */
+ 0xd8, /* 11011000 */
+ 0x6c, /* 01101100 */
+ 0x36, /* 00110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 175 0xaf '»' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xd8, /* 11011000 */
+ 0x6c, /* 01101100 */
+ 0x36, /* 00110110 */
+ 0x6c, /* 01101100 */
+ 0xd8, /* 11011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 176 0xb0 '░' */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+ 0x11, /* 00010001 */
+ 0x44, /* 01000100 */
+
+ /* 177 0xb1 '▒' */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+
+ /* 178 0xb2 '▓' */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+
+ /* 179 0xb3 '│' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 180 0xb4 '┤' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 181 0xb5 '╡' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 182 0xb6 '╢' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 183 0xb7 '╖' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 184 0xb8 '╕' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 185 0xb9 '╣' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x06, /* 00000110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 186 0xba '║' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 187 0xbb '╗' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 188 0xbc '╝' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x06, /* 00000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 189 0xbd '╜' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 190 0xbe '╛' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 191 0xbf '┐' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 192 0xc0 '└' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 193 0xc1 '┴' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 194 0xc2 '┬' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 195 0xc3 '├' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 196 0xc4 '─' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 197 0xc5 '┼' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 198 0xc6 '╞' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 199 0xc7 '╟' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 200 0xc8 '╚' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x30, /* 00110000 */
+ 0x3f, /* 00111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 201 0xc9 '╔' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x30, /* 00110000 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 202 0xca '╩' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf7, /* 11110111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 203 0xcb '╦' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xf7, /* 11110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 204 0xcc '╠' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x30, /* 00110000 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 205 0xcd '═' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 206 0xce '╬' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf7, /* 11110111 */
+ 0x00, /* 00000000 */
+ 0xf7, /* 11110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 207 0xcf '╧' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 208 0xd0 '╨' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 209 0xd1 '╤' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 210 0xd2 '╥' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 211 0xd3 '╙' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x3f, /* 00111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 212 0xd4 '╘' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 213 0xd5 '╒' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 214 0xd6 '╓' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 215 0xd7 '╫' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xff, /* 11111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 216 0xd8 '╪' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 217 0xd9 '┘' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 218 0xda '┌' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 219 0xdb '█' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 220 0xdc '▄' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 221 0xdd '▌' */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+
+ /* 222 0xde '▐' */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+
+ /* 223 0xdf '▀' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 224 0xe0 'α' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xdc, /* 11011100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 225 0xe1 'ß' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xd8, /* 11011000 */
+ 0xcc, /* 11001100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 226 0xe2 'Γ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 227 0xe3 'π' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 228 0xe4 'Σ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 229 0xe5 'σ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 230 0xe6 'µ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 231 0xe7 'τ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 232 0xe8 'Φ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 233 0xe9 'Θ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 234 0xea 'Ω' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xee, /* 11101110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 235 0xeb 'δ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1e, /* 00011110 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x3e, /* 00111110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 236 0xec '∞' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 237 0xed 'φ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x03, /* 00000011 */
+ 0x06, /* 00000110 */
+ 0x7e, /* 01111110 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0xf3, /* 11110011 */
+ 0x7e, /* 01111110 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 238 0xee 'ε' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1c, /* 00011100 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x7c, /* 01111100 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x1c, /* 00011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 239 0xef '∩' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 240 0xf0 '≡' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 241 0xf1 '±' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 242 0xf2 '≥' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 243 0xf3 '≤' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 244 0xf4 '⌠' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x0e, /* 00001110 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 245 0xf5 '⌡' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 246 0xf6 '÷' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 247 0xf7 '≈' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 248 0xf8 '°' */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 249 0xf9 '·' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 250 0xfa '•' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 251 0xfb '√' */
+ 0x00, /* 00000000 */
+ 0x0f, /* 00001111 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0xec, /* 11101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x3c, /* 00111100 */
+ 0x1c, /* 00011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 252 0xfc 'ⁿ' */
+ 0x00, /* 00000000 */
+ 0x6c, /* 01101100 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 253 0xfd '²' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x32, /* 00110010 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 254 0xfe '■' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 255 0xff ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+} };
+
+
+const struct font_desc font_vga_8x16 = {
+ .idx = VGA8x16_IDX,
+ .name = "VGA8x16",
+ .width = 8,
+ .height = 16,
+ .charcount = 256,
+ .data = fontdata_8x16.data,
+ .pref = 0,
+};
+EXPORT_SYMBOL(font_vga_8x16);
diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c
new file mode 100644
index 000000000..69570b8c3
--- /dev/null
+++ b/lib/fonts/font_8x8.c
@@ -0,0 +1,2584 @@
+// SPDX-License-Identifier: GPL-2.0
+/**********************************************/
+/* */
+/* Font file generated by cpi2fnt */
+/* */
+/**********************************************/
+
+#include <linux/font.h>
+
+#define FONTDATAMAX 2048
+
+static const struct font_data fontdata_8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 1 0x01 '^A' */
+ 0x7e, /* 01111110 */
+ 0x81, /* 10000001 */
+ 0xa5, /* 10100101 */
+ 0x81, /* 10000001 */
+ 0xbd, /* 10111101 */
+ 0x99, /* 10011001 */
+ 0x81, /* 10000001 */
+ 0x7e, /* 01111110 */
+
+ /* 2 0x02 '^B' */
+ 0x7e, /* 01111110 */
+ 0xff, /* 11111111 */
+ 0xdb, /* 11011011 */
+ 0xff, /* 11111111 */
+ 0xc3, /* 11000011 */
+ 0xe7, /* 11100111 */
+ 0xff, /* 11111111 */
+ 0x7e, /* 01111110 */
+
+ /* 3 0x03 '^C' */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 4 0x04 '^D' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 5 0x05 '^E' */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+
+ /* 6 0x06 '^F' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 8 0x08 '^H' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xe7, /* 11100111 */
+ 0xc3, /* 11000011 */
+ 0xc3, /* 11000011 */
+ 0xe7, /* 11100111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x42, /* 01000010 */
+ 0x42, /* 01000010 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 10 0x0a '^J' */
+ 0xff, /* 11111111 */
+ 0xc3, /* 11000011 */
+ 0x99, /* 10011001 */
+ 0xbd, /* 10111101 */
+ 0xbd, /* 10111101 */
+ 0x99, /* 10011001 */
+ 0xc3, /* 11000011 */
+ 0xff, /* 11111111 */
+
+ /* 11 0x0b '^K' */
+ 0x0f, /* 00001111 */
+ 0x07, /* 00000111 */
+ 0x0f, /* 00001111 */
+ 0x7d, /* 01111101 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x78, /* 01111000 */
+
+ /* 12 0x0c '^L' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+
+ /* 13 0x0d '^M' */
+ 0x3f, /* 00111111 */
+ 0x33, /* 00110011 */
+ 0x3f, /* 00111111 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x70, /* 01110000 */
+ 0xf0, /* 11110000 */
+ 0xe0, /* 11100000 */
+
+ /* 14 0x0e '^N' */
+ 0x7f, /* 01111111 */
+ 0x63, /* 01100011 */
+ 0x7f, /* 01111111 */
+ 0x63, /* 01100011 */
+ 0x63, /* 01100011 */
+ 0x67, /* 01100111 */
+ 0xe6, /* 11100110 */
+ 0xc0, /* 11000000 */
+
+ /* 15 0x0f '^O' */
+ 0x18, /* 00011000 */
+ 0xdb, /* 11011011 */
+ 0x3c, /* 00111100 */
+ 0xe7, /* 11100111 */
+ 0xe7, /* 11100111 */
+ 0x3c, /* 00111100 */
+ 0xdb, /* 11011011 */
+ 0x18, /* 00011000 */
+
+ /* 16 0x10 '^P' */
+ 0x80, /* 10000000 */
+ 0xe0, /* 11100000 */
+ 0xf8, /* 11111000 */
+ 0xfe, /* 11111110 */
+ 0xf8, /* 11111000 */
+ 0xe0, /* 11100000 */
+ 0x80, /* 10000000 */
+ 0x00, /* 00000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x02, /* 00000010 */
+ 0x0e, /* 00001110 */
+ 0x3e, /* 00111110 */
+ 0xfe, /* 11111110 */
+ 0x3e, /* 00111110 */
+ 0x0e, /* 00001110 */
+ 0x02, /* 00000010 */
+ 0x00, /* 00000000 */
+
+ /* 18 0x12 '^R' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+
+ /* 19 0x13 '^S' */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+
+ /* 20 0x14 '^T' */
+ 0x7f, /* 01111111 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7b, /* 01111011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x00, /* 00000000 */
+
+ /* 21 0x15 '^U' */
+ 0x3e, /* 00111110 */
+ 0x61, /* 01100001 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x86, /* 10000110 */
+ 0x7c, /* 01111100 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 23 0x17 '^W' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+
+ /* 24 0x18 '^X' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xfe, /* 11111110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, /* 00000000 */
+ 0x24, /* 00100100 */
+ 0x66, /* 01100110 */
+ 0xff, /* 11111111 */
+ 0x66, /* 01100110 */
+ 0x24, /* 00100100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 31 0x1f '^_' */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 33 0x21 '!' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 34 0x22 '"' */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x24, /* 00100100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 35 0x23 '#' */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 36 0x24 '$' */
+ 0x18, /* 00011000 */
+ 0x3e, /* 00111110 */
+ 0x60, /* 01100000 */
+ 0x3c, /* 00111100 */
+ 0x06, /* 00000110 */
+ 0x7c, /* 01111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xcc, /* 11001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x66, /* 01100110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 38 0x26 '&' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 39 0x27 ''' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 40 0x28 '(' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+
+ /* 41 0x29 ')' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0xff, /* 11111111 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+
+ /* 45 0x2d '-' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 47 0x2f '/' */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0x80, /* 10000000 */
+ 0x00, /* 00000000 */
+
+ /* 48 0x30 '0' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 49 0x31 '1' */
+ 0x18, /* 00011000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 50 0x32 '2' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x06, /* 00000110 */
+ 0x1c, /* 00011100 */
+ 0x30, /* 00110000 */
+ 0x66, /* 01100110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 51 0x33 '3' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x06, /* 00000110 */
+ 0x3c, /* 00111100 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 52 0x34 '4' */
+ 0x1c, /* 00011100 */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x1e, /* 00011110 */
+ 0x00, /* 00000000 */
+
+ /* 53 0x35 '5' */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 54 0x36 '6' */
+ 0x38, /* 00111000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 55 0x37 '7' */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+
+ /* 56 0x38 '8' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 57 0x39 '9' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+
+ /* 60 0x3c '<' */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 62 0x3e '>' */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+
+ /* 63 0x3f '?' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 64 0x40 '@' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xde, /* 11011110 */
+ 0xde, /* 11011110 */
+ 0xde, /* 11011110 */
+ 0xc0, /* 11000000 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+
+ /* 65 0x41 'A' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 66 0x42 'B' */
+ 0xfc, /* 11111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 67 0x43 'C' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 68 0x44 'D' */
+ 0xf8, /* 11111000 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+
+ /* 69 0x45 'E' */
+ 0xfe, /* 11111110 */
+ 0x62, /* 01100010 */
+ 0x68, /* 01101000 */
+ 0x78, /* 01111000 */
+ 0x68, /* 01101000 */
+ 0x62, /* 01100010 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 70 0x46 'F' */
+ 0xfe, /* 11111110 */
+ 0x62, /* 01100010 */
+ 0x68, /* 01101000 */
+ 0x78, /* 01111000 */
+ 0x68, /* 01101000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+
+ /* 71 0x47 'G' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xce, /* 11001110 */
+ 0x66, /* 01100110 */
+ 0x3a, /* 00111010 */
+ 0x00, /* 00000000 */
+
+ /* 72 0x48 'H' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 73 0x49 'I' */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 74 0x4a 'J' */
+ 0x1e, /* 00011110 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x78, /* 01111000 */
+ 0x00, /* 00000000 */
+
+ /* 75 0x4b 'K' */
+ 0xe6, /* 11100110 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0x78, /* 01111000 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+
+ /* 76 0x4c 'L' */
+ 0xf0, /* 11110000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x62, /* 01100010 */
+ 0x66, /* 01100110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 77 0x4d 'M' */
+ 0xc6, /* 11000110 */
+ 0xee, /* 11101110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 78 0x4e 'N' */
+ 0xc6, /* 11000110 */
+ 0xe6, /* 11100110 */
+ 0xf6, /* 11110110 */
+ 0xde, /* 11011110 */
+ 0xce, /* 11001110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 79 0x4f 'O' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 80 0x50 'P' */
+ 0xfc, /* 11111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+
+ /* 81 0x51 'Q' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xce, /* 11001110 */
+ 0x7c, /* 01111100 */
+ 0x0e, /* 00001110 */
+
+ /* 82 0x52 'R' */
+ 0xfc, /* 11111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x6c, /* 01101100 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+
+ /* 83 0x53 'S' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 84 0x54 'T' */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x5a, /* 01011010 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 85 0x55 'U' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 86 0x56 'V' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 87 0x57 'W' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 88 0x58 'X' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 89 0x59 'Y' */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 90 0x5a 'Z' */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x8c, /* 10001100 */
+ 0x18, /* 00011000 */
+ 0x32, /* 00110010 */
+ 0x66, /* 01100110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 91 0x5b '[' */
+ 0x3c, /* 00111100 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 92 0x5c '\' */
+ 0xc0, /* 11000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x02, /* 00000010 */
+ 0x00, /* 00000000 */
+
+ /* 93 0x5d ']' */
+ 0x3c, /* 00111100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 94 0x5e '^' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+
+ /* 96 0x60 '`' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 98 0x62 'b' */
+ 0xe0, /* 11100000 */
+ 0x60, /* 01100000 */
+ 0x7c, /* 01111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 100 0x64 'd' */
+ 0x1c, /* 00011100 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 102 0x66 'f' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x60, /* 01100000 */
+ 0xf8, /* 11111000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0xf8, /* 11111000 */
+
+ /* 104 0x68 'h' */
+ 0xe0, /* 11100000 */
+ 0x60, /* 01100000 */
+ 0x6c, /* 01101100 */
+ 0x76, /* 01110110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+
+ /* 105 0x69 'i' */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 106 0x6a 'j' */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+
+ /* 107 0x6b 'k' */
+ 0xe0, /* 11100000 */
+ 0x60, /* 01100000 */
+ 0x66, /* 01100110 */
+ 0x6c, /* 01101100 */
+ 0x78, /* 01111000 */
+ 0x6c, /* 01101100 */
+ 0xe6, /* 11100110 */
+ 0x00, /* 00000000 */
+
+ /* 108 0x6c 'l' */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xec, /* 11101100 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0x00, /* 00000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0x1e, /* 00011110 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x76, /* 01110110 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x00, /* 00000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x06, /* 00000110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 116 0x74 't' */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0xfc, /* 11111100 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x36, /* 00110110 */
+ 0x1c, /* 00011100 */
+ 0x00, /* 00000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xd6, /* 11010110 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0xfc, /* 11111100 */
+
+ /* 122 0x7a 'z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x4c, /* 01001100 */
+ 0x18, /* 00011000 */
+ 0x32, /* 00110010 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 123 0x7b '{' */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0e, /* 00001110 */
+ 0x00, /* 00000000 */
+
+ /* 124 0x7c '|' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 125 0x7d '}' */
+ 0x70, /* 01110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+
+ /* 126 0x7e '~' */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 127 0x7f '' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 128 0x80 'Ç' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0x78, /* 01111000 */
+
+ /* 129 0x81 'ü' */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 130 0x82 'é' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 131 0x83 'â' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 132 0x84 'ä' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 133 0x85 'à' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 134 0x86 'å' */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 135 0x87 'ç' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x7e, /* 01111110 */
+ 0x0c, /* 00001100 */
+ 0x38, /* 00111000 */
+
+ /* 136 0x88 'ê' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 137 0x89 'ë' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 138 0x8a 'è' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 139 0x8b 'ï' */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 140 0x8c 'î' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 141 0x8d 'ì' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 142 0x8e 'Ä' */
+ 0xc6, /* 11000110 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 143 0x8f 'Å' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 144 0x90 'É' */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xf8, /* 11111000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 145 0x91 'æ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0xd8, /* 11011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 146 0x92 'Æ' */
+ 0x3e, /* 00111110 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xfe, /* 11111110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xce, /* 11001110 */
+ 0x00, /* 00000000 */
+
+ /* 147 0x93 'ô' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 148 0x94 'ö' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 149 0x95 'ò' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 150 0x96 'û' */
+ 0x78, /* 01111000 */
+ 0x84, /* 10000100 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 151 0x97 'ù' */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 152 0x98 'ÿ' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0xfc, /* 11111100 */
+
+ /* 153 0x99 'Ö' */
+ 0xc6, /* 11000110 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 154 0x9a 'Ü' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 155 0x9b '¢' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 156 0x9c '£' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x64, /* 01100100 */
+ 0xf0, /* 11110000 */
+ 0x60, /* 01100000 */
+ 0x66, /* 01100110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 157 0x9d '¥' */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 158 0x9e '₧' */
+ 0xf8, /* 11111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xfa, /* 11111010 */
+ 0xc6, /* 11000110 */
+ 0xcf, /* 11001111 */
+ 0xc6, /* 11000110 */
+ 0xc7, /* 11000111 */
+
+ /* 159 0x9f 'ƒ' */
+ 0x0e, /* 00001110 */
+ 0x1b, /* 00011011 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+
+ /* 160 0xa0 'á' */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 161 0xa1 'í' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 162 0xa2 'ó' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 163 0xa3 'ú' */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 164 0xa4 'ñ' */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+
+ /* 165 0xa5 'Ñ' */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0xe6, /* 11100110 */
+ 0xf6, /* 11110110 */
+ 0xde, /* 11011110 */
+ 0xce, /* 11001110 */
+ 0x00, /* 00000000 */
+
+ /* 166 0xa6 'ª' */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x3e, /* 00111110 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 167 0xa7 'º' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 168 0xa8 '¿' */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x63, /* 01100011 */
+ 0x3e, /* 00111110 */
+ 0x00, /* 00000000 */
+
+ /* 169 0xa9 '⌐' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 170 0xaa '¬' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 171 0xab '½' */
+ 0x63, /* 01100011 */
+ 0xe6, /* 11100110 */
+ 0x6c, /* 01101100 */
+ 0x7e, /* 01111110 */
+ 0x33, /* 00110011 */
+ 0x66, /* 01100110 */
+ 0xcc, /* 11001100 */
+ 0x0f, /* 00001111 */
+
+ /* 172 0xac '¼' */
+ 0x63, /* 01100011 */
+ 0xe6, /* 11100110 */
+ 0x6c, /* 01101100 */
+ 0x7a, /* 01111010 */
+ 0x36, /* 00110110 */
+ 0x6a, /* 01101010 */
+ 0xdf, /* 11011111 */
+ 0x06, /* 00000110 */
+
+ /* 173 0xad '¡' */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 174 0xae '«' */
+ 0x00, /* 00000000 */
+ 0x33, /* 00110011 */
+ 0x66, /* 01100110 */
+ 0xcc, /* 11001100 */
+ 0x66, /* 01100110 */
+ 0x33, /* 00110011 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 175 0xaf '»' */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0x66, /* 01100110 */
+ 0x33, /* 00110011 */
+ 0x66, /* 01100110 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 176 0xb0 '░' */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+
+ /* 177 0xb1 '▒' */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+
+ /* 178 0xb2 '▓' */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+
+ /* 179 0xb3 '│' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 180 0xb4 '┤' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 181 0xb5 '╡' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 182 0xb6 '╢' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 183 0xb7 '╖' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 184 0xb8 '╕' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 185 0xb9 '╣' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x06, /* 00000110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 186 0xba '║' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 187 0xbb '╗' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 188 0xbc '╝' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x06, /* 00000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 189 0xbd '╜' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 190 0xbe '╛' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 191 0xbf '┐' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 192 0xc0 '└' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 193 0xc1 '┴' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 194 0xc2 '┬' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 195 0xc3 '├' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 196 0xc4 '─' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 197 0xc5 '┼' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 198 0xc6 '╞' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 199 0xc7 '╟' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 200 0xc8 '╚' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x30, /* 00110000 */
+ 0x3f, /* 00111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 201 0xc9 '╔' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x30, /* 00110000 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 202 0xca '╩' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf7, /* 11110111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 203 0xcb '╦' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xf7, /* 11110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 204 0xcc '╠' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x30, /* 00110000 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 205 0xcd '═' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 206 0xce '╬' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf7, /* 11110111 */
+ 0x00, /* 00000000 */
+ 0xf7, /* 11110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 207 0xcf '╧' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 208 0xd0 '╨' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 209 0xd1 '╤' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 210 0xd2 '╥' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 211 0xd3 '╙' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x3f, /* 00111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 212 0xd4 '╘' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 213 0xd5 '╒' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 214 0xd6 '╓' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 215 0xd7 '╫' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xff, /* 11111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 216 0xd8 '╪' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 217 0xd9 '┘' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 218 0xda '┌' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 219 0xdb '█' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 220 0xdc '▄' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 221 0xdd '▌' */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+
+ /* 222 0xde '▐' */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+
+ /* 223 0xdf '▀' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 224 0xe0 'α' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0xc8, /* 11001000 */
+ 0xdc, /* 11011100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 225 0xe1 'ß' */
+ 0x78, /* 01111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xd8, /* 11011000 */
+ 0xcc, /* 11001100 */
+ 0xc6, /* 11000110 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+
+ /* 226 0xe2 'Γ' */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 227 0xe3 'π' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 228 0xe4 'Σ' */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 229 0xe5 'σ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+
+ /* 230 0xe6 'µ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0xc0, /* 11000000 */
+
+ /* 231 0xe7 'τ' */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 232 0xe8 'Φ' */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+
+ /* 233 0xe9 'Θ' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 234 0xea 'Ω' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xee, /* 11101110 */
+ 0x00, /* 00000000 */
+
+ /* 235 0xeb 'δ' */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x3e, /* 00111110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 236 0xec '∞' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 237 0xed 'φ' */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x7e, /* 01111110 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7e, /* 01111110 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+
+ /* 238 0xee 'ε' */
+ 0x1e, /* 00011110 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x7e, /* 01111110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x1e, /* 00011110 */
+ 0x00, /* 00000000 */
+
+ /* 239 0xef '∩' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 240 0xf0 '≡' */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 241 0xf1 '±' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 242 0xf2 '≥' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 243 0xf3 '≤' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 244 0xf4 '⌠' */
+ 0x0e, /* 00001110 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 245 0xf5 '⌡' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+
+ /* 246 0xf6 '÷' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 247 0xf7 '≈' */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 248 0xf8 '°' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 249 0xf9 '·' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 250 0xfa '•' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 251 0xfb '√' */
+ 0x0f, /* 00001111 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0xec, /* 11101100 */
+ 0x6c, /* 01101100 */
+ 0x3c, /* 00111100 */
+ 0x1c, /* 00011100 */
+
+ /* 252 0xfc 'ⁿ' */
+ 0x6c, /* 01101100 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 253 0xfd '²' */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 254 0xfe '■' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 255 0xff ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+} };
+
+
+const struct font_desc font_vga_8x8 = {
+ .idx = VGA8x8_IDX,
+ .name = "VGA8x8",
+ .width = 8,
+ .height = 8,
+ .charcount = 256,
+ .data = fontdata_8x8.data,
+ .pref = 0,
+};
diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c
new file mode 100644
index 000000000..18755c33d
--- /dev/null
+++ b/lib/fonts/font_acorn_8x8.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Acorn-like font definition, with PC graphics characters */
+
+#include <linux/font.h>
+
+#define FONTDATAMAX 2048
+
+static const struct font_data acorndata_8x8 = {
+{ 0, 0, FONTDATAMAX, 0 }, {
+/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
+/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
+/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */
+/* 03 */ 0x6c, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^C */
+/* 04 */ 0x10, 0x38, 0x7c, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^D */
+/* 05 */ 0x00, 0x18, 0x3c, 0xe7, 0xe7, 0x3c, 0x18, 0x00, /* ^E */
+/* 06 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 07 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 08 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 09 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 0A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 0B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 0C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 0D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 0E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 0F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 10 */ 0x00, 0x60, 0x78, 0x7e, 0x7e, 0x78, 0x60, 0x00, /* |> */
+/* 11 */ 0x00, 0x06, 0x1e, 0x7e, 0x7e, 0x1e, 0x06, 0x00, /* <| */
+/* 12 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 13 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 14 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 15 */ 0x3c, 0x60, 0x3c, 0x66, 0x3c, 0x06, 0x3c, 0x00,
+/* 16 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 17 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 18 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 19 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 1A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 1B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 1C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 1D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 1E */ 0x00, 0x18, 0x18, 0x3c, 0x3c, 0x7e, 0x7e, 0x00, /* /\ */
+/* 1F */ 0x00, 0x7e, 0x7e, 0x3c, 0x3c, 0x18, 0x18, 0x00, /* \/ */
+/* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* */
+/* 21 */ 0x18, 0x3c, 0x3c, 0x18, 0x18, 0x00, 0x18, 0x00, /* ! */
+/* 22 */ 0x6C, 0x6C, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, /* " */
+/* 23 */ 0x36, 0x36, 0x7F, 0x36, 0x7F, 0x36, 0x36, 0x00, /* # */
+/* 24 */ 0x0C, 0x3F, 0x68, 0x3E, 0x0B, 0x7E, 0x18, 0x00, /* $ */
+/* 25 */ 0x60, 0x66, 0x0C, 0x18, 0x30, 0x66, 0x06, 0x00, /* % */
+/* 26 */ 0x38, 0x6C, 0x6C, 0x38, 0x6D, 0x66, 0x3B, 0x00, /* & */
+/* 27 */ 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, /* ' */
+/* 28 */ 0x0C, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x00, /* ( */
+/* 29 */ 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x18, 0x30, 0x00, /* ) */
+/* 2A */ 0x00, 0x18, 0x7E, 0x3C, 0x7E, 0x18, 0x00, 0x00, /* * */
+/* 2B */ 0x00, 0x18, 0x18, 0x7E, 0x18, 0x18, 0x00, 0x00, /* + */
+/* 2C */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, /* , */
+/* 2D */ 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, /* - */
+/* 2E */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, /* . */
+/* 2F */ 0x00, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00, 0x00, /* / */
+/* 30 */ 0x3C, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x3C, 0x00, /* 0 */
+/* 31 */ 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* 1 */
+/* 32 */ 0x3C, 0x66, 0x06, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* 2 */
+/* 33 */ 0x3C, 0x66, 0x06, 0x1C, 0x06, 0x66, 0x3C, 0x00, /* 3 */
+/* 34 */ 0x0C, 0x1C, 0x3C, 0x6C, 0x7E, 0x0C, 0x0C, 0x00, /* 4 */
+/* 35 */ 0x7E, 0x60, 0x7C, 0x06, 0x06, 0x66, 0x3C, 0x00, /* 5 */
+/* 36 */ 0x1C, 0x30, 0x60, 0x7C, 0x66, 0x66, 0x3C, 0x00, /* 6 */
+/* 37 */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x00, /* 7 */
+/* 38 */ 0x3C, 0x66, 0x66, 0x3C, 0x66, 0x66, 0x3C, 0x00, /* 8 */
+/* 39 */ 0x3C, 0x66, 0x66, 0x3E, 0x06, 0x0C, 0x38, 0x00, /* 9 */
+/* 3A */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, /* : */
+/* 3B */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30, /* ; */
+/* 3C */ 0x0C, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0C, 0x00, /* < */
+/* 3D */ 0x00, 0x00, 0x7E, 0x00, 0x7E, 0x00, 0x00, 0x00, /* = */
+/* 3E */ 0x30, 0x18, 0x0C, 0x06, 0x0C, 0x18, 0x30, 0x00, /* > */
+/* 3F */ 0x3C, 0x66, 0x0C, 0x18, 0x18, 0x00, 0x18, 0x00, /* ? */
+/* 40 */ 0x3C, 0x66, 0x6E, 0x6A, 0x6E, 0x60, 0x3C, 0x00, /* @ */
+/* 41 */ 0x3C, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* A */
+/* 42 */ 0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, /* B */
+/* 43 */ 0x3C, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3C, 0x00, /* C */
+/* 44 */ 0x78, 0x6C, 0x66, 0x66, 0x66, 0x6C, 0x78, 0x00, /* D */
+/* 45 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x7E, 0x00, /* E */
+/* 46 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x60, 0x00, /* F */
+/* 47 */ 0x3C, 0x66, 0x60, 0x6E, 0x66, 0x66, 0x3C, 0x00, /* G */
+/* 48 */ 0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* H */
+/* 49 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* I */
+/* 4A */ 0x3E, 0x0C, 0x0C, 0x0C, 0x0C, 0x6C, 0x38, 0x00, /* J */
+/* 4B */ 0x66, 0x6C, 0x78, 0x70, 0x78, 0x6C, 0x66, 0x00, /* K */
+/* 4C */ 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7E, 0x00, /* L */
+/* 4D */ 0x63, 0x77, 0x7F, 0x6B, 0x6B, 0x63, 0x63, 0x00, /* M */
+/* 4E */ 0x66, 0x66, 0x76, 0x7E, 0x6E, 0x66, 0x66, 0x00, /* N */
+/* 4F */ 0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* O */
+/* 50 */ 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x60, 0x00, /* P */
+/* 51 */ 0x3C, 0x66, 0x66, 0x66, 0x6A, 0x6C, 0x36, 0x00, /* Q */
+/* 52 */ 0x7C, 0x66, 0x66, 0x7C, 0x6C, 0x66, 0x66, 0x00, /* R */
+/* 53 */ 0x3C, 0x66, 0x60, 0x3C, 0x06, 0x66, 0x3C, 0x00, /* S */
+/* 54 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* T */
+/* 55 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* U */
+/* 56 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* V */
+/* 57 */ 0x63, 0x63, 0x6B, 0x6B, 0x7F, 0x77, 0x63, 0x00, /* W */
+/* 58 */ 0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, /* X */
+/* 59 */ 0x66, 0x66, 0x66, 0x3C, 0x18, 0x18, 0x18, 0x00, /* Y */
+/* 5A */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x7E, 0x00, /* Z */
+/* 5B */ 0x7C, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7C, 0x00, /* [ */
+/* 5C */ 0x00, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x00, 0x00, /* \ */
+/* 5D */ 0x3E, 0x06, 0x06, 0x06, 0x06, 0x06, 0x3E, 0x00, /* ] */
+/* 5E */ 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^ */
+/* 5F */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, /* _ */
+/* 60 */ 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ` */
+/* 61 */ 0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3E, 0x00, /* a */
+/* 62 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x7C, 0x00, /* b */
+/* 63 */ 0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, /* c */
+/* 64 */ 0x06, 0x06, 0x3E, 0x66, 0x66, 0x66, 0x3E, 0x00, /* d */
+/* 65 */ 0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, /* e */
+/* 66 */ 0x1C, 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x00, /* f */
+/* 67 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* g */
+/* 68 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* h */
+/* 69 */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3C, 0x00, /* i */
+/* 6A */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x70, /* j */
+/* 6B */ 0x60, 0x60, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, /* k */
+/* 6C */ 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x00, /* l */
+/* 6D */ 0x00, 0x00, 0x36, 0x7F, 0x6B, 0x6B, 0x63, 0x00, /* m */
+/* 6E */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* n */
+/* 6F */ 0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, /* o */
+/* 70 */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, /* p */
+/* 71 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x07, /* q */
+/* 72 */ 0x00, 0x00, 0x6C, 0x76, 0x60, 0x60, 0x60, 0x00, /* r */
+/* 73 */ 0x00, 0x00, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x00, /* s */
+/* 74 */ 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x1C, 0x00, /* t */
+/* 75 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3E, 0x00, /* u */
+/* 76 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* v */
+/* 77 */ 0x00, 0x00, 0x63, 0x6B, 0x6B, 0x7F, 0x36, 0x00, /* w */
+/* 78 */ 0x00, 0x00, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x00, /* x */
+/* 79 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* y */
+/* 7A */ 0x00, 0x00, 0x7E, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* z */
+/* 7B */ 0x0C, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0C, 0x00, /* { */
+/* 7C */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* | */
+/* 7D */ 0x30, 0x18, 0x18, 0x0E, 0x18, 0x18, 0x30, 0x00, /* } */
+/* 7E */ 0x31, 0x6B, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, /* ~ */
+/* 7F */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /*  */
+/* 80 */ 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x30, 0x60,
+/* 81 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x00,
+/* 82 */ 0x0c, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
+/* 83 */ 0x18, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
+/* 84 */ 0x66, 0x00, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
+/* 85 */ 0x30, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
+/* 86 */ 0x3c, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
+/* 87 */ 0x00, 0x00, 0x3c, 0x66, 0x60, 0x66, 0x3c, 0x60,
+/* 88 */ 0x3c, 0x66, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
+/* 89 */ 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
+/* 8A */ 0x30, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
+/* 8B */ 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
+/* 8C */ 0x3c, 0x66, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
+/* 8D */ 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
+/* 8E */ 0x66, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00,
+/* 8F */ 0x18, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00,
+/* 90 */ 0x0c, 0x18, 0x7e, 0x60, 0x7c, 0x60, 0x7e, 0x00,
+/* 91 */ 0x00, 0x00, 0x3f, 0x0d, 0x3f, 0x6c, 0x3f, 0x00,
+/* 92 */ 0x3f, 0x66, 0x66, 0x7f, 0x66, 0x66, 0x67, 0x00,
+/* 93 */ 0x3c, 0x66, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
+/* 94 */ 0x66, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
+/* 95 */ 0x30, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
+/* 96 */ 0x3c, 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00,
+/* 97 */ 0x30, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00,
+/* 98 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x3c,
+/* 99 */ 0x66, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x00,
+/* 9A */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00,
+/* 9B */ 0x08, 0x3e, 0x6b, 0x68, 0x6b, 0x3e, 0x08, 0x00,
+/* 9C */ 0x1c, 0x36, 0x30, 0x7c, 0x30, 0x30, 0x7e, 0x00,
+/* 9D */ 0x66, 0x3c, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00,
+/* 9E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* 9F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* A0 */ 0x0c, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
+/* A1 */ 0x0c, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00,
+/* A2 */ 0x0c, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00,
+/* A3 */ 0x0c, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00,
+/* A4 */ 0x36, 0x6c, 0x00, 0x7c, 0x66, 0x66, 0x66, 0x00,
+/* A5 */ 0x36, 0x6c, 0x00, 0x66, 0x76, 0x6e, 0x66, 0x00,
+/* A6 */ 0x1c, 0x06, 0x1e, 0x36, 0x1e, 0x00, 0x3e, 0x00,
+/* A7 */ 0x1c, 0x36, 0x36, 0x36, 0x1c, 0x00, 0x3e, 0x00,
+/* A8 */ 0x18, 0x00, 0x18, 0x18, 0x30, 0x66, 0x3c, 0x00,
+/* A9 */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* AA */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* AB */ 0x40, 0xc0, 0x40, 0x4f, 0x41, 0x0f, 0x08, 0x0f,
+/* AC */ 0x40, 0xc0, 0x40, 0x48, 0x48, 0x0a, 0x0f, 0x02,
+/* AD */ 0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00,
+/* AE */ 0x00, 0x33, 0x66, 0xcc, 0xcc, 0x66, 0x33, 0x00,
+/* AF */ 0x00, 0xcc, 0x66, 0x33, 0x33, 0x66, 0xcc, 0x00,
+/* B0 */ 0x22, 0x88, 0x22, 0x88, 0x22, 0x88, 0x22, 0x88,
+/* B1 */ 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
+/* B2 */ 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
+/* B3 */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+/* B4 */ 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
+/* B5 */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18,
+/* B6 */ 0x66, 0x66, 0x66, 0xe6, 0x66, 0x66, 0x66, 0x66,
+/* B7 */ 0x00, 0x00, 0x00, 0xfe, 0x66, 0x66, 0x66, 0x66,
+/* B8 */ 0x00, 0x00, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18,
+/* B9 */ 0x66, 0x66, 0xe6, 0x06, 0xe6, 0x66, 0x66, 0x66,
+/* BA */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+/* BB */ 0x00, 0x00, 0xfe, 0x06, 0xe6, 0x66, 0x66, 0x66,
+/* BC */ 0x66, 0x66, 0xe6, 0x06, 0xfe, 0x00, 0x00, 0x00,
+/* BD */ 0x66, 0x66, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00,
+/* BE */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x00, 0x00, 0x00,
+/* BF */ 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18,
+/* C0 */ 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
+/* C1 */ 0x18, 0x18, 0x18, 0xff, 0x00, 0x00, 0x00, 0x00,
+/* C2 */ 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18,
+/* C3 */ 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
+/* C4 */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
+/* C5 */ 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
+/* C6 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18,
+/* C7 */ 0x66, 0x66, 0x66, 0x67, 0x66, 0x66, 0x66, 0x66,
+/* C8 */ 0x66, 0x66, 0x67, 0x60, 0x7f, 0x00, 0x00, 0x00,
+/* C9 */ 0x00, 0x00, 0x7f, 0x60, 0x67, 0x66, 0x66, 0x66,
+/* CA */ 0x66, 0x66, 0xe7, 0x00, 0xff, 0x00, 0x00, 0x00,
+/* CB */ 0x00, 0x00, 0xff, 0x00, 0xe7, 0x66, 0x66, 0x66,
+/* CC */ 0x66, 0x66, 0x67, 0x60, 0x67, 0x66, 0x66, 0x66,
+/* CD */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00,
+/* CE */ 0x66, 0x66, 0xe7, 0x00, 0xe7, 0x66, 0x66, 0x66,
+/* CF */ 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00,
+/* D0 */ 0x66, 0x66, 0x66, 0xff, 0x00, 0x00, 0x00, 0x00,
+/* D1 */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18,
+/* D2 */ 0x00, 0x00, 0x00, 0xff, 0x66, 0x66, 0x66, 0x66,
+/* D3 */ 0x66, 0x66, 0x66, 0x7f, 0x00, 0x00, 0x00, 0x00,
+/* D4 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00,
+/* D5 */ 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18,
+/* D6 */ 0x00, 0x00, 0x00, 0x7f, 0x66, 0x66, 0x66, 0x66,
+/* D7 */ 0x66, 0x66, 0x66, 0xff, 0x66, 0x66, 0x66, 0x66,
+/* D8 */ 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18,
+/* D9 */ 0x18, 0x18, 0x18, 0xf8, 0x00, 0x00, 0x00, 0x00,
+/* DA */ 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18,
+/* DB */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+/* DC */ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+/* DD */ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
+/* DE */ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+/* DF */ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+/* E0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E1 */ 0x3c, 0x66, 0x66, 0x6c, 0x66, 0x66, 0x6c, 0xc0,
+/* E2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E6 */ 0x00, 0x00, 0x33, 0x33, 0x33, 0x33, 0x3e, 0x60,
+/* E7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E8 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* E9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* EA */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* EB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* EC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* ED */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* EE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* EF */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F1 */ 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x7e, 0x00,
+/* F2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F6 */ 0x00, 0x18, 0x00, 0xff, 0x00, 0x18, 0x00, 0x00,
+/* F7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* F8 */ 0x3c, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* F9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* FA */ 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
+/* FB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* FC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00,
+/* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00,
+/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+} };
+
+const struct font_desc font_acorn_8x8 = {
+ .idx = ACORN8x8_IDX,
+ .name = "Acorn8x8",
+ .width = 8,
+ .height = 8,
+ .charcount = 256,
+ .data = acorndata_8x8.data,
+#ifdef CONFIG_ARCH_ACORN
+ .pref = 20,
+#else
+ .pref = 0,
+#endif
+};
diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c
new file mode 100644
index 000000000..8d39fd447
--- /dev/null
+++ b/lib/fonts/font_mini_4x6.c
@@ -0,0 +1,2159 @@
+
+/* Hand composed "Minuscule" 4x6 font, with binary data generated using
+ * Perl stub.
+ *
+ * Use 'perl -x mini_4x6.c < mini_4x6.c > new_version.c' to regenerate
+ * binary data.
+ *
+ * Created by Kenneth Albanowski.
+ * No rights reserved, released to the public domain.
+ *
+ * Version 1.0
+ */
+
+/*
+
+#!/usr/bin/perl -pn
+
+s{((0x)?[0-9a-fA-F]+)(.*\[([\*\ ]{4})\])}{
+
+ ($num,$pat,$bits) = ($1,$3,$4);
+
+ $bits =~ s/([^\s0])|(.)/ defined($1) + 0 /ge;
+
+ $num = ord(pack("B8", $bits));
+ $num |= $num >> 4;
+ $num = sprintf("0x%.2x", $num);
+
+ #print "$num,$pat,$bits\n";
+
+ $num . $pat;
+}ge;
+
+__END__;
+*/
+
+/* Note: binary data consists of one byte for each row of each character top
+ to bottom, character 0 to character 255, six bytes per character. Each
+ byte contains the same four character bits in both nybbles.
+ MSBit to LSBit = left to right.
+ */
+
+#include <linux/font.h>
+
+#define FONTDATAMAX 1536
+
+static const struct font_data fontdata_mini_4x6 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /*{*/
+ /* Char 0: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 1: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 2: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 3: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 4: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 5: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 6: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 7: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 8: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 9: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 10: '' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 11: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 12: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 13: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 14: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 15: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 16: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 17: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 18: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 19: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 20: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 21: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 22: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 23: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 24: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 25: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 26: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 27: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 28: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 29: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 30: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 31: ' ' */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 32: ' ' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 33: '!' */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 34: '"' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 35: '#' */
+ 0xaa, /*= [* * ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 36: '$' */
+ 0x44, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0xee, /*= [*** ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 37: '%' */
+ 0xaa, /*= [* * ] */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 38: '&' */
+ 0x66, /*= [ ** ] */
+ 0x99, /*= [* *] */
+ 0x66, /*= [ ** ] */
+ 0xaa, /*= [* * ] */
+ 0xdd, /*= [** *] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 39: ''' */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 40: '(' */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 41: ')' */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 42: '*' */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 43: '+' */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 44: ',' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 45: '-' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 46: '.' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 47: '/' */
+ 0x00, /*= [ ] */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 48: '0' */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 49: '1' */
+ 0x44, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 50: '2' */
+ 0xcc, /*= [** ] */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/
+ /* Char 51: '3' */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0x22, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 52: '4' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 53: '5' */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 54: '6' */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 55: '7' */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 56: '8' */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 57: '9' */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 58: ':' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 59: ';' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ /*}*/
+ /*{*/ /* Char 60: '<' */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 61: '=' */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 62: '>' */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 63: '?' */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 64: '@' */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 65: 'A' */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 66: 'B' */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 67: 'C' */
+ 0x66, /*= [ ** ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 68: 'D' */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 69: 'E' */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 70: 'F' */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 71: 'G' */
+ 0x66, /*= [ ** ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 72: 'H' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 73: 'I' */
+ 0xee, /*= [*** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 74: 'J' */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 75: 'K' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 76: 'L' */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 77: 'M' */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 78: 'N' */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 79: 'O' */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 80: 'P' */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 81: 'Q' */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 82: 'R' */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 83: 'S' */
+ 0x66, /*= [ ** ] */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 84: 'T' */
+ 0xee, /*= [*** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 85: 'U' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 86: 'V' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 87: 'W' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 88: 'X' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 89: 'Y' */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 90: 'Z' */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 91: '[' */
+ 0x66, /*= [ ** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 92: '\' */
+ 0x00, /*= [ ] */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 93: ']' */
+ 0x66, /*= [ ** ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 94: '^' */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 95: '_' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ /*}*/
+ /*{*/ /* Char 96: '`' */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 97: 'a' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 98: 'b' */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 99: 'c' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0x88, /*= [* ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 100: 'd' */
+ 0x22, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0xaa, /*= [* * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 101: 'e' */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x88, /*= [* ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 102: 'f' */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 103: 'g' */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0xaa, /*= [* * ] */
+ 0x66, /*= [ ** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 104: 'h' */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 105: 'i' */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 106: 'j' */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 107: 'k' */
+ 0x00, /*= [ ] */
+ 0x88, /*= [* ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 108: 'l' */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 109: 'm' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 110: 'n' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 111: 'o' */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 112: 'p' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0xcc, /*= [** ] */
+ 0x88, /*= [* ] */
+ /*}*/
+ /*{*/ /* Char 113: 'q' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0xaa, /*= [* * ] */
+ 0x66, /*= [ ** ] */
+ 0x22, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 114: 'r' */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0xaa, /*= [* * ] */
+ 0x88, /*= [* ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 115: 's' */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0xcc, /*= [** ] */
+ 0x22, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 116: 't' */
+ 0x00, /*= [ ] */
+ 0x44, /*= [ * ] */
+ 0xee, /*= [*** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 117: 'u' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 118: 'v' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 119: 'w' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 120: 'x' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xaa, /*= [* * ] */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 121: 'y' */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x22, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ /*}*/
+ /*{*/ /* Char 122: 'z' */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0x66, /*= [ ** ] */
+ 0xcc, /*= [** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 123: '{' */
+ 0x22, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x22, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 124: '|' */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 125: '}' */
+ 0x88, /*= [* ] */
+ 0x44, /*= [ * ] */
+ 0x66, /*= [ ** ] */
+ 0x44, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 126: '~' */
+ 0x55, /*= [ * *] */
+ 0xaa, /*= [* * ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 127: '' */
+ 0x44, /*= [ * ] */
+ 0xaa, /*= [* * ] */
+ 0xaa, /*= [* * ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 128: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 129: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 130: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 131: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 132: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 133: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 134: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 135: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 136: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 137: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 138: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 139: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 140: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 141: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 142: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 143: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 144: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 145: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 146: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 147: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 148: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 149: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 150: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 151: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 152: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 153: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 154: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 155: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 156: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 157: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 158: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 159: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 160: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 161: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 162: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 163: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 164: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 165: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 166: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 167: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 168: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 169: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 170: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 171: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 172: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 173: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 174: */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0xcc, /*= [** ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 175: */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0x66, /*= [ ** ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 176: */
+ 0x88, /*= [* ] */
+ 0x22, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x22, /*= [ * ] */
+ 0x88, /*= [* ] */
+ 0x22, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 177: */
+ 0xaa, /*= [* * ] */
+ 0x55, /*= [ * *] */
+ 0xaa, /*= [* * ] */
+ 0x55, /*= [ * *] */
+ 0xaa, /*= [* * ] */
+ 0x55, /*= [ * *] */
+ /*}*/
+ /*{*/ /* Char 178: */
+ 0xdd, /*= [** *] */
+ 0xbb, /*= [* **] */
+ 0xdd, /*= [** *] */
+ 0xbb, /*= [* **] */
+ 0xdd, /*= [** *] */
+ 0xbb, /*= [* **] */
+ /*}*/
+ /*{*/ /* Char 179: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 180: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 181: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 182: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xee, /*= [*** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 183: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 184: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 185: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 186: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 187: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 188: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 189: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 190: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 191: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xcc, /*= [** ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 192: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x77, /*= [ ***] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 193: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 194: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 195: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x77, /*= [ ***] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 196: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 197: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xff, /*= [****] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 198: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x77, /*= [ ***] */
+ 0x77, /*= [ ***] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 199: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x77, /*= [ ***] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 200: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x77, /*= [ ***] */
+ 0x77, /*= [ ***] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 201: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x77, /*= [ ***] */
+ 0x77, /*= [ ***] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 202: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 203: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 204: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x77, /*= [ ***] */
+ 0x77, /*= [ ***] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 205: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 206: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 207: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 208: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 209: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 210: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 211: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x77, /*= [ ***] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 212: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x77, /*= [ ***] */
+ 0x77, /*= [ ***] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 213: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x77, /*= [ ***] */
+ 0x77, /*= [ ***] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 214: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x77, /*= [ ***] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 215: */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0xff, /*= [****] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ /*}*/
+ /*{*/ /* Char 216: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 217: */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0xcc, /*= [** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 218: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x77, /*= [ ***] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ 0x44, /*= [ * ] */
+ /*}*/
+ /*{*/ /* Char 219: */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ /*}*/
+ /*{*/ /* Char 220: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ /*}*/
+ /*{*/ /* Char 221: */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ 0xcc, /*= [** ] */
+ /*}*/
+ /*{*/ /* Char 222: */
+ 0x33, /*= [ **] */
+ 0x33, /*= [ **] */
+ 0x33, /*= [ **] */
+ 0x33, /*= [ **] */
+ 0x33, /*= [ **] */
+ 0x33, /*= [ **] */
+ /*}*/
+ /*{*/ /* Char 223: */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0xff, /*= [****] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 224: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 225: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 226: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 227: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 228: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 229: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 230: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 231: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 232: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 233: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 234: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 235: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 236: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 237: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 238: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 239: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 240: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 241: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 242: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 243: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 244: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 245: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 246: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 247: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 248: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 249: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 250: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 251: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 252: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 253: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 254: */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ 0x66, /*= [ ** ] */
+ 0x66, /*= [ ** ] */
+ 0x00, /*= [ ] */
+ 0x00, /*= [ ] */
+ /*}*/
+ /*{*/ /* Char 255: */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0xee, /*= [*** ] */
+ 0x00, /*= [ ] */
+ /*}*/
+} };
+
+const struct font_desc font_mini_4x6 = {
+ .idx = MINI4x6_IDX,
+ .name = "MINI4x6",
+ .width = 4,
+ .height = 6,
+ .charcount = 256,
+ .data = fontdata_mini_4x6.data,
+ .pref = 3,
+};
+
diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c
new file mode 100644
index 000000000..ae98ca179
--- /dev/null
+++ b/lib/fonts/font_pearl_8x8.c
@@ -0,0 +1,2588 @@
+// SPDX-License-Identifier: GPL-2.0
+/**********************************************/
+/* */
+/* Font file generated by cpi2fnt */
+/* ------------------------------ */
+/* Combined with the alphanumeric */
+/* portion of Greg Harp's old PEARL */
+/* font (from earlier versions of */
+/* linux-m86k) by John Shifflett */
+/* */
+/**********************************************/
+
+#include <linux/font.h>
+
+#define FONTDATAMAX 2048
+
+static const struct font_data fontdata_pearl8x8 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 1 0x01 '^A' */
+ 0x7e, /* 01111110 */
+ 0x81, /* 10000001 */
+ 0xa5, /* 10100101 */
+ 0x81, /* 10000001 */
+ 0xbd, /* 10111101 */
+ 0x99, /* 10011001 */
+ 0x81, /* 10000001 */
+ 0x7e, /* 01111110 */
+
+ /* 2 0x02 '^B' */
+ 0x7e, /* 01111110 */
+ 0xff, /* 11111111 */
+ 0xdb, /* 11011011 */
+ 0xff, /* 11111111 */
+ 0xc3, /* 11000011 */
+ 0xe7, /* 11100111 */
+ 0xff, /* 11111111 */
+ 0x7e, /* 01111110 */
+
+ /* 3 0x03 '^C' */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 4 0x04 '^D' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0x10, /* 00010000 */
+ 0x00, /* 00000000 */
+
+ /* 5 0x05 '^E' */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0x38, /* 00111000 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+
+ /* 6 0x06 '^F' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x7c, /* 01111100 */
+ 0xfe, /* 11111110 */
+ 0xfe, /* 11111110 */
+ 0x7c, /* 01111100 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 8 0x08 '^H' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xe7, /* 11100111 */
+ 0xc3, /* 11000011 */
+ 0xc3, /* 11000011 */
+ 0xe7, /* 11100111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x42, /* 01000010 */
+ 0x42, /* 01000010 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 10 0x0a '^J' */
+ 0xff, /* 11111111 */
+ 0xc3, /* 11000011 */
+ 0x99, /* 10011001 */
+ 0xbd, /* 10111101 */
+ 0xbd, /* 10111101 */
+ 0x99, /* 10011001 */
+ 0xc3, /* 11000011 */
+ 0xff, /* 11111111 */
+
+ /* 11 0x0b '^K' */
+ 0x0f, /* 00001111 */
+ 0x07, /* 00000111 */
+ 0x0f, /* 00001111 */
+ 0x7d, /* 01111101 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x78, /* 01111000 */
+
+ /* 12 0x0c '^L' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+
+ /* 13 0x0d '^M' */
+ 0x3f, /* 00111111 */
+ 0x33, /* 00110011 */
+ 0x3f, /* 00111111 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x70, /* 01110000 */
+ 0xf0, /* 11110000 */
+ 0xe0, /* 11100000 */
+
+ /* 14 0x0e '^N' */
+ 0x7f, /* 01111111 */
+ 0x63, /* 01100011 */
+ 0x7f, /* 01111111 */
+ 0x63, /* 01100011 */
+ 0x63, /* 01100011 */
+ 0x67, /* 01100111 */
+ 0xe6, /* 11100110 */
+ 0xc0, /* 11000000 */
+
+ /* 15 0x0f '^O' */
+ 0x18, /* 00011000 */
+ 0xdb, /* 11011011 */
+ 0x3c, /* 00111100 */
+ 0xe7, /* 11100111 */
+ 0xe7, /* 11100111 */
+ 0x3c, /* 00111100 */
+ 0xdb, /* 11011011 */
+ 0x18, /* 00011000 */
+
+ /* 16 0x10 '^P' */
+ 0x80, /* 10000000 */
+ 0xe0, /* 11100000 */
+ 0xf8, /* 11111000 */
+ 0xfe, /* 11111110 */
+ 0xf8, /* 11111000 */
+ 0xe0, /* 11100000 */
+ 0x80, /* 10000000 */
+ 0x00, /* 00000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x02, /* 00000010 */
+ 0x0e, /* 00001110 */
+ 0x3e, /* 00111110 */
+ 0xfe, /* 11111110 */
+ 0x3e, /* 00111110 */
+ 0x0e, /* 00001110 */
+ 0x02, /* 00000010 */
+ 0x00, /* 00000000 */
+
+ /* 18 0x12 '^R' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+
+ /* 19 0x13 '^S' */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+
+ /* 20 0x14 '^T' */
+ 0x7f, /* 01111111 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7b, /* 01111011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x00, /* 00000000 */
+
+ /* 21 0x15 '^U' */
+ 0x3e, /* 00111110 */
+ 0x61, /* 01100001 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x86, /* 10000110 */
+ 0x7c, /* 01111100 */
+
+ /* 22 0x16 '^V' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 23 0x17 '^W' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+
+ /* 24 0x18 '^X' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, /* 00000000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xfe, /* 11111110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, /* 00000000 */
+ 0x24, /* 00100100 */
+ 0x66, /* 01100110 */
+ 0xff, /* 11111111 */
+ 0x66, /* 01100110 */
+ 0x24, /* 00100100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 31 0x1f '^_' */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x7e, /* 01111110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 33 0x21 '!' */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 34 0x22 '"' */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 35 0x23 '#' */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 36 0x24 '$' */
+ 0x18, /* 00011000 */
+ 0x3e, /* 00111110 */
+ 0x60, /* 01100000 */
+ 0x3c, /* 00111100 */
+ 0x06, /* 00000110 */
+ 0x7c, /* 01111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xcc, /* 11001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x66, /* 01100110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 38 0x26 '&' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x68, /* 01101000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 39 0x27 ''' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 40 0x28 '(' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+
+ /* 41 0x29 ')' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0xff, /* 11111111 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+
+ /* 45 0x2d '-' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 47 0x2f '/' */
+ 0x03, /* 00000011 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 48 0x30 '0' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xde, /* 11011110 */
+ 0xfe, /* 11111110 */
+ 0xf6, /* 11110110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 49 0x31 '1' */
+ 0x18, /* 00011000 */
+ 0x78, /* 01111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 50 0x32 '2' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 51 0x33 '3' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x06, /* 00000110 */
+ 0x1c, /* 00011100 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 52 0x34 '4' */
+ 0x1c, /* 00011100 */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+
+ /* 53 0x35 '5' */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 54 0x36 '6' */
+ 0x38, /* 00111000 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 55 0x37 '7' */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+
+ /* 56 0x38 '8' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 57 0x39 '9' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+
+ /* 60 0x3c '<' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 62 0x3e '>' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+
+ /* 63 0x3f '?' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 64 0x40 '@' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xde, /* 11011110 */
+ 0xde, /* 11011110 */
+ 0xde, /* 11011110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 65 0x41 'A' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 66 0x42 'B' */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 67 0x43 'C' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 68 0x44 'D' */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 69 0x45 'E' */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xf8, /* 11111000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 70 0x46 'F' */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xf8, /* 11111000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 71 0x47 'G' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xce, /* 11001110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 72 0x48 'H' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 73 0x49 'I' */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 74 0x4a 'J' */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 75 0x4b 'K' */
+ 0xc6, /* 11000110 */
+ 0xcc, /* 11001100 */
+ 0xd8, /* 11011000 */
+ 0xf0, /* 11110000 */
+ 0xd8, /* 11011000 */
+ 0xcc, /* 11001100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 76 0x4c 'L' */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 77 0x4d 'M' */
+ 0x82, /* 10000010 */
+ 0xc6, /* 11000110 */
+ 0xee, /* 11101110 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 78 0x4e 'N' */
+ 0xc6, /* 11000110 */
+ 0xe6, /* 11100110 */
+ 0xf6, /* 11110110 */
+ 0xde, /* 11011110 */
+ 0xce, /* 11001110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 79 0x4f 'O' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 80 0x50 'P' */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 81 0x51 'Q' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xf6, /* 11110110 */
+ 0xde, /* 11011110 */
+ 0x7c, /* 01111100 */
+ 0x06, /* 00000110 */
+
+ /* 82 0x52 'R' */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0xd8, /* 11011000 */
+ 0xcc, /* 11001100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 83 0x53 'S' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x38, /* 00111000 */
+ 0x0c, /* 00001100 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 84 0x54 'T' */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 85 0x55 'U' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 86 0x56 'V' */
+ 0xc3, /* 11000011 */
+ 0xc3, /* 11000011 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 87 0x57 'W' */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xfe, /* 11111110 */
+ 0xee, /* 11101110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 88 0x58 'X' */
+ 0xc3, /* 11000011 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0xc3, /* 11000011 */
+ 0x00, /* 00000000 */
+
+ /* 89 0x59 'Y' */
+ 0xc3, /* 11000011 */
+ 0xc3, /* 11000011 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 90 0x5a 'Z' */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 91 0x5b '[' */
+ 0x3c, /* 00111100 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 92 0x5c '\' */
+ 0xc0, /* 11000000 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x06, /* 00000110 */
+ 0x03, /* 00000011 */
+ 0x00, /* 00000000 */
+
+ /* 93 0x5d ']' */
+ 0x3c, /* 00111100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 94 0x5e '^' */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+
+ /* 96 0x60 '`' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0x06, /* 00000110 */
+ 0x7e, /* 01111110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 98 0x62 'b' */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 100 0x64 'd' */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x7e, /* 01111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 102 0x66 'f' */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x60, /* 01100000 */
+ 0xf0, /* 11110000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x60, /* 01100000 */
+ 0x00, /* 00000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x7c, /* 01111100 */
+
+ /* 104 0x68 'h' */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 105 0x69 'i' */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 106 0x6a 'j' */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+
+ /* 107 0x6b 'k' */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xcc, /* 11001100 */
+ 0xd8, /* 11011000 */
+ 0xf0, /* 11110000 */
+ 0xd8, /* 11011000 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+
+ /* 108 0x6c 'l' */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xec, /* 11101100 */
+ 0xfe, /* 11111110 */
+ 0xd6, /* 11010110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfc, /* 11111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfc, /* 11111100 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+
+ /* 113 0x71 'q' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+
+ /* 114 0x72 'r' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0xe6, /* 11100110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 115 0x73 's' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x06, /* 00000110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 116 0x74 't' */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x7c, /* 01111100 */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x36, /* 00110110 */
+ 0x1c, /* 00011100 */
+ 0x00, /* 00000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xd6, /* 11010110 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xc3, /* 11000011 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+
+ /* 122 0x7a 'z' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x0c, /* 00001100 */
+ 0x38, /* 00111000 */
+ 0x60, /* 01100000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 123 0x7b '{' */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0e, /* 00001110 */
+ 0x00, /* 00000000 */
+
+ /* 124 0x7c '|' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 125 0x7d '}' */
+ 0x70, /* 01110000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+
+ /* 126 0x7e '~' */
+ 0x72, /* 01110010 */
+ 0x9c, /* 10011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 127 0x7f '' */
+ 0x00, /* 00000000 */
+ 0x10, /* 00010000 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 128 0x80 'Ç' */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x0c, /* 00001100 */
+ 0x78, /* 01111000 */
+
+ /* 129 0x81 'ü' */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 130 0x82 'é' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 131 0x83 'â' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 132 0x84 'ä' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 133 0x85 'à' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 134 0x86 'å' */
+ 0x30, /* 00110000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 135 0x87 'ç' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x7e, /* 01111110 */
+ 0x0c, /* 00001100 */
+ 0x38, /* 00111000 */
+
+ /* 136 0x88 'ê' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 137 0x89 'ë' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 138 0x8a 'è' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 139 0x8b 'ï' */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 140 0x8c 'î' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 141 0x8d 'ì' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 142 0x8e 'Ä' */
+ 0xc6, /* 11000110 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 143 0x8f 'Å' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 144 0x90 'É' */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xf8, /* 11111000 */
+ 0xc0, /* 11000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 145 0x91 'æ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0xd8, /* 11011000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 146 0x92 'Æ' */
+ 0x3e, /* 00111110 */
+ 0x6c, /* 01101100 */
+ 0xcc, /* 11001100 */
+ 0xfe, /* 11111110 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xce, /* 11001110 */
+ 0x00, /* 00000000 */
+
+ /* 147 0x93 'ô' */
+ 0x7c, /* 01111100 */
+ 0x82, /* 10000010 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 148 0x94 'ö' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 149 0x95 'ò' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 150 0x96 'û' */
+ 0x78, /* 01111000 */
+ 0x84, /* 10000100 */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 151 0x97 'ù' */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 152 0x98 'ÿ' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7e, /* 01111110 */
+ 0x06, /* 00000110 */
+ 0xfc, /* 11111100 */
+
+ /* 153 0x99 'Ö' */
+ 0xc6, /* 11000110 */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 154 0x9a 'Ü' */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 155 0x9b '¢' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 156 0x9c '£' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x64, /* 01100100 */
+ 0xf0, /* 11110000 */
+ 0x60, /* 01100000 */
+ 0x66, /* 01100110 */
+ 0xfc, /* 11111100 */
+ 0x00, /* 00000000 */
+
+ /* 157 0x9d '¥' */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 158 0x9e '₧' */
+ 0xf8, /* 11111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xfa, /* 11111010 */
+ 0xc6, /* 11000110 */
+ 0xcf, /* 11001111 */
+ 0xc6, /* 11000110 */
+ 0xc7, /* 11000111 */
+
+ /* 159 0x9f 'ƒ' */
+ 0x0e, /* 00001110 */
+ 0x1b, /* 00011011 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+
+ /* 160 0xa0 'á' */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x7c, /* 01111100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 161 0xa1 'í' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x38, /* 00111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 162 0xa2 'ó' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+
+ /* 163 0xa3 'ú' */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 164 0xa4 'ñ' */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0xdc, /* 11011100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x00, /* 00000000 */
+
+ /* 165 0xa5 'Ñ' */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0xe6, /* 11100110 */
+ 0xf6, /* 11110110 */
+ 0xde, /* 11011110 */
+ 0xce, /* 11001110 */
+ 0x00, /* 00000000 */
+
+ /* 166 0xa6 'ª' */
+ 0x3c, /* 00111100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x3e, /* 00111110 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 167 0xa7 'º' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 168 0xa8 '¿' */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x63, /* 01100011 */
+ 0x3e, /* 00111110 */
+ 0x00, /* 00000000 */
+
+ /* 169 0xa9 '⌐' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 170 0xaa '¬' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0x06, /* 00000110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 171 0xab '½' */
+ 0x63, /* 01100011 */
+ 0xe6, /* 11100110 */
+ 0x6c, /* 01101100 */
+ 0x7e, /* 01111110 */
+ 0x33, /* 00110011 */
+ 0x66, /* 01100110 */
+ 0xcc, /* 11001100 */
+ 0x0f, /* 00001111 */
+
+ /* 172 0xac '¼' */
+ 0x63, /* 01100011 */
+ 0xe6, /* 11100110 */
+ 0x6c, /* 01101100 */
+ 0x7a, /* 01111010 */
+ 0x36, /* 00110110 */
+ 0x6a, /* 01101010 */
+ 0xdf, /* 11011111 */
+ 0x06, /* 00000110 */
+
+ /* 173 0xad '¡' */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 174 0xae '«' */
+ 0x00, /* 00000000 */
+ 0x33, /* 00110011 */
+ 0x66, /* 01100110 */
+ 0xcc, /* 11001100 */
+ 0x66, /* 01100110 */
+ 0x33, /* 00110011 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 175 0xaf '»' */
+ 0x00, /* 00000000 */
+ 0xcc, /* 11001100 */
+ 0x66, /* 01100110 */
+ 0x33, /* 00110011 */
+ 0x66, /* 01100110 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 176 0xb0 '░' */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+ 0x22, /* 00100010 */
+ 0x88, /* 10001000 */
+
+ /* 177 0xb1 '▒' */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+ 0x55, /* 01010101 */
+ 0xaa, /* 10101010 */
+
+ /* 178 0xb2 '▓' */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+ 0x77, /* 01110111 */
+ 0xdd, /* 11011101 */
+
+ /* 179 0xb3 '│' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 180 0xb4 '┤' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 181 0xb5 '╡' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 182 0xb6 '╢' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 183 0xb7 '╖' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 184 0xb8 '╕' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 185 0xb9 '╣' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x06, /* 00000110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 186 0xba '║' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 187 0xbb '╗' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x06, /* 00000110 */
+ 0xf6, /* 11110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 188 0xbc '╝' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf6, /* 11110110 */
+ 0x06, /* 00000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 189 0xbd '╜' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 190 0xbe '╛' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 191 0xbf '┐' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xf8, /* 11111000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 192 0xc0 '└' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 193 0xc1 '┴' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 194 0xc2 '┬' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 195 0xc3 '├' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 196 0xc4 '─' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 197 0xc5 '┼' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 198 0xc6 '╞' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 199 0xc7 '╟' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 200 0xc8 '╚' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x30, /* 00110000 */
+ 0x3f, /* 00111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 201 0xc9 '╔' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x30, /* 00110000 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 202 0xca '╩' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf7, /* 11110111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 203 0xcb '╦' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xf7, /* 11110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 204 0xcc '╠' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x37, /* 00110111 */
+ 0x30, /* 00110000 */
+ 0x37, /* 00110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 205 0xcd '═' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 206 0xce '╬' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xf7, /* 11110111 */
+ 0x00, /* 00000000 */
+ 0xf7, /* 11110111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 207 0xcf '╧' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 208 0xd0 '╨' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 209 0xd1 '╤' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 210 0xd2 '╥' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 211 0xd3 '╙' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x3f, /* 00111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 212 0xd4 '╘' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 213 0xd5 '╒' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 214 0xd6 '╓' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3f, /* 00111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 215 0xd7 '╫' */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0xff, /* 11111111 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+
+ /* 216 0xd8 '╪' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0xff, /* 11111111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 217 0xd9 '┘' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xf8, /* 11111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 218 0xda '┌' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x1f, /* 00011111 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 219 0xdb '█' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 220 0xdc '▄' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+
+ /* 221 0xdd '▌' */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+ 0xf0, /* 11110000 */
+
+ /* 222 0xde '▐' */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+ 0x0f, /* 00001111 */
+
+ /* 223 0xdf '▀' */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0xff, /* 11111111 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 224 0xe0 'α' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0xc8, /* 11001000 */
+ 0xdc, /* 11011100 */
+ 0x76, /* 01110110 */
+ 0x00, /* 00000000 */
+
+ /* 225 0xe1 'ß' */
+ 0x78, /* 01111000 */
+ 0xcc, /* 11001100 */
+ 0xcc, /* 11001100 */
+ 0xd8, /* 11011000 */
+ 0xcc, /* 11001100 */
+ 0xc6, /* 11000110 */
+ 0xcc, /* 11001100 */
+ 0x00, /* 00000000 */
+
+ /* 226 0xe2 'Γ' */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0xc0, /* 11000000 */
+ 0x00, /* 00000000 */
+
+ /* 227 0xe3 'π' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x00, /* 00000000 */
+
+ /* 228 0xe4 'Σ' */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+
+ /* 229 0xe5 'σ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+ 0x00, /* 00000000 */
+
+ /* 230 0xe6 'µ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x7c, /* 01111100 */
+ 0xc0, /* 11000000 */
+
+ /* 231 0xe7 'τ' */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+
+ /* 232 0xe8 'Φ' */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x3c, /* 00111100 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+
+ /* 233 0xe9 'Θ' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xfe, /* 11111110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+
+ /* 234 0xea 'Ω' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0xee, /* 11101110 */
+ 0x00, /* 00000000 */
+
+ /* 235 0xeb 'δ' */
+ 0x0e, /* 00001110 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x3e, /* 00111110 */
+ 0x66, /* 01100110 */
+ 0x66, /* 01100110 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+
+ /* 236 0xec '∞' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 237 0xed 'φ' */
+ 0x06, /* 00000110 */
+ 0x0c, /* 00001100 */
+ 0x7e, /* 01111110 */
+ 0xdb, /* 11011011 */
+ 0xdb, /* 11011011 */
+ 0x7e, /* 01111110 */
+ 0x60, /* 01100000 */
+ 0xc0, /* 11000000 */
+
+ /* 238 0xee 'ε' */
+ 0x1e, /* 00011110 */
+ 0x30, /* 00110000 */
+ 0x60, /* 01100000 */
+ 0x7e, /* 01111110 */
+ 0x60, /* 01100000 */
+ 0x30, /* 00110000 */
+ 0x1e, /* 00011110 */
+ 0x00, /* 00000000 */
+
+ /* 239 0xef '∩' */
+ 0x00, /* 00000000 */
+ 0x7c, /* 01111100 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0xc6, /* 11000110 */
+ 0x00, /* 00000000 */
+
+ /* 240 0xf0 '≡' */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0xfe, /* 11111110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 241 0xf1 '±' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x7e, /* 01111110 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 242 0xf2 '≥' */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 243 0xf3 '≤' */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x18, /* 00011000 */
+ 0x0c, /* 00001100 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+
+ /* 244 0xf4 '⌠' */
+ 0x0e, /* 00001110 */
+ 0x1b, /* 00011011 */
+ 0x1b, /* 00011011 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+
+ /* 245 0xf5 '⌡' */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0xd8, /* 11011000 */
+ 0xd8, /* 11011000 */
+ 0x70, /* 01110000 */
+
+ /* 246 0xf6 '÷' */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x7e, /* 01111110 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 247 0xf7 '≈' */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x76, /* 01110110 */
+ 0xdc, /* 11011100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 248 0xf8 '°' */
+ 0x38, /* 00111000 */
+ 0x6c, /* 01101100 */
+ 0x6c, /* 01101100 */
+ 0x38, /* 00111000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 249 0xf9 '·' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 250 0xfa '•' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x18, /* 00011000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 251 0xfb '√' */
+ 0x0f, /* 00001111 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0x0c, /* 00001100 */
+ 0xec, /* 11101100 */
+ 0x6c, /* 01101100 */
+ 0x3c, /* 00111100 */
+ 0x1c, /* 00011100 */
+
+ /* 252 0xfc 'ⁿ' */
+ 0x6c, /* 01101100 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x36, /* 00110110 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 253 0xfd '²' */
+ 0x78, /* 01111000 */
+ 0x0c, /* 00001100 */
+ 0x18, /* 00011000 */
+ 0x30, /* 00110000 */
+ 0x7c, /* 01111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 254 0xfe '■' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x3c, /* 00111100 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+
+ /* 255 0xff ' ' */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+ 0x00, /* 00000000 */
+} };
+
+const struct font_desc font_pearl_8x8 = {
+ .idx = PEARL8x8_IDX,
+ .name = "PEARL8x8",
+ .width = 8,
+ .height = 8,
+ .charcount = 256,
+ .data = fontdata_pearl8x8.data,
+ .pref = 2,
+};
diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c
new file mode 100644
index 000000000..91daf5ab8
--- /dev/null
+++ b/lib/fonts/font_sun12x22.c
@@ -0,0 +1,6166 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+
+#define FONTDATAMAX 11264
+
+static const struct font_data fontdata_sun12x22 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ /* 0 0x00 '^@' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 1 0x01 '^A' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x65, 0x30, /* 011001010011 */
+ 0x6d, 0xb0, /* 011011011011 */
+ 0x60, 0x30, /* 011000000011 */
+ 0x62, 0x30, /* 011000100011 */
+ 0x62, 0x30, /* 011000100011 */
+ 0x60, 0x30, /* 011000000011 */
+ 0x6f, 0xb0, /* 011011111011 */
+ 0x67, 0x30, /* 011001110011 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 2 0x02 '^B' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x7a, 0xf0, /* 011110101111 */
+ 0x72, 0x70, /* 011100100111 */
+ 0x7f, 0xf0, /* 011111111111 */
+ 0x7d, 0xf0, /* 011111011111 */
+ 0x7d, 0xf0, /* 011111011111 */
+ 0x7f, 0xf0, /* 011111111111 */
+ 0x70, 0x70, /* 011100000111 */
+ 0x78, 0xf0, /* 011110001111 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 3 0x03 '^C' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 4 0x04 '^D' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 5 0x05 '^E' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x3d, 0xe0, /* 001111011110 */
+ 0x3d, 0xe0, /* 001111011110 */
+ 0x1a, 0xc0, /* 000110101100 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 6 0x06 '^F' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x36, 0xc0, /* 001101101100 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 7 0x07 '^G' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 8 0x08 '^H' */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xf9, 0xf0, /* 111110011111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xe0, 0x70, /* 111000000111 */
+ 0xe0, 0x70, /* 111000000111 */
+ 0xc0, 0x30, /* 110000000011 */
+ 0xc0, 0x30, /* 110000000011 */
+ 0xe0, 0x70, /* 111000000111 */
+ 0xe0, 0x70, /* 111000000111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xf9, 0xf0, /* 111110011111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+
+ /* 9 0x09 '^I' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 10 0x0a '^J' */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xf9, 0xf0, /* 111110011111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xe6, 0x70, /* 111001100111 */
+ 0xe6, 0x70, /* 111001100111 */
+ 0xcf, 0x30, /* 110011110011 */
+ 0xcf, 0x30, /* 110011110011 */
+ 0xe6, 0x70, /* 111001100111 */
+ 0xe6, 0x70, /* 111001100111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0xf9, 0xf0, /* 111110011111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+
+ /* 11 0x0b '^K' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x01, 0xe0, /* 000000011110 */
+ 0x03, 0x60, /* 000000110110 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 12 0x0c '^L' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 13 0x0d '^M' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x0c, 0x60, /* 000011000110 */
+ 0x0c, 0x60, /* 000011000110 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x3c, 0x00, /* 001111000000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 14 0x0e '^N' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x19, 0xe0, /* 000110011110 */
+ 0x1b, 0xe0, /* 000110111110 */
+ 0x1b, 0xc0, /* 000110111100 */
+ 0x79, 0x80, /* 011110011000 */
+ 0xf8, 0x00, /* 111110000000 */
+ 0xf0, 0x00, /* 111100000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 15 0x0f '^O' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x6d, 0xb0, /* 011011011011 */
+ 0x3d, 0xe0, /* 001111011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3d, 0xe0, /* 001111011110 */
+ 0x6d, 0xb0, /* 011011011011 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 16 0x10 '^P' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x20, /* 000000000010 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x01, 0xe0, /* 000000011110 */
+ 0x03, 0xe0, /* 000000111110 */
+ 0x07, 0xe0, /* 000001111110 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x07, 0xe0, /* 000001111110 */
+ 0x03, 0xe0, /* 000000111110 */
+ 0x01, 0xe0, /* 000000011110 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x20, /* 000000000010 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 17 0x11 '^Q' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x40, 0x00, /* 010000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x7e, 0x00, /* 011111100000 */
+ 0x7f, 0x00, /* 011111110000 */
+ 0x7f, 0x80, /* 011111111000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x7f, 0x80, /* 011111111000 */
+ 0x7f, 0x00, /* 011111110000 */
+ 0x7e, 0x00, /* 011111100000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x40, 0x00, /* 010000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 18 0x12 '^R' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 19 0x13 '^S' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 20 0x14 '^T' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xf0, /* 000111111111 */
+ 0x3c, 0xc0, /* 001111001100 */
+ 0x7c, 0xc0, /* 011111001100 */
+ 0x7c, 0xc0, /* 011111001100 */
+ 0x7c, 0xc0, /* 011111001100 */
+ 0x3c, 0xc0, /* 001111001100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x1c, 0xe0, /* 000111001110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 21 0x15 '^U' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 22 0x16 '^V' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 23 0x17 '^W' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 24 0x18 '^X' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 25 0x19 '^Y' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 26 0x1a '^Z' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x08, 0x00, /* 000010000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0xff, 0xe0, /* 111111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x08, 0x00, /* 000010000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 27 0x1b '^[' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x00, /* 000000010000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xf0, /* 011111111111 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x00, /* 000000010000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 28 0x1c '^\' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 29 0x1d '^]' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x09, 0x00, /* 000010010000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x09, 0x00, /* 000010010000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 30 0x1e '^^' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 31 0x1f '^_' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 32 0x20 ' ' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 33 0x21 '!' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 34 0x22 '"' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 35 0x23 '#' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x30, /* 000000110011 */
+ 0x03, 0x30, /* 000000110011 */
+ 0x03, 0x30, /* 000000110011 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x1f, 0xf0, /* 000111111111 */
+ 0x1f, 0xf0, /* 000111111111 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 36 0x24 '$' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x66, 0xe0, /* 011001101110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x3e, 0x00, /* 001111100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x07, 0xc0, /* 000001111100 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 37 0x25 '%' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x38, 0xc0, /* 001110001100 */
+ 0x4c, 0xc0, /* 010011001100 */
+ 0x45, 0x80, /* 010001011000 */
+ 0x65, 0x80, /* 011001011000 */
+ 0x3b, 0x00, /* 001110110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0d, 0xc0, /* 000011011100 */
+ 0x1a, 0x60, /* 000110100110 */
+ 0x1a, 0x20, /* 000110100010 */
+ 0x33, 0x20, /* 001100110010 */
+ 0x31, 0xc0, /* 001100011100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 38 0x26 '&' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x3e, 0x00, /* 001111100000 */
+ 0x77, 0x00, /* 011101110000 */
+ 0x63, 0x60, /* 011000110110 */
+ 0x61, 0xe0, /* 011000011110 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 39 0x27 ''' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 40 0x28 '(' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 41 0x29 ')' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 42 0x2a '*' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x76, 0xe0, /* 011101101110 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x76, 0xe0, /* 011101101110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 43 0x2b '+' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 44 0x2c ',' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 45 0x2d '-' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 46 0x2e '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 47 0x2f '/' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 48 0x30 '0' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 49 0x31 '1' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x36, 0x00, /* 001101100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 50 0x32 '2' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x40, 0xc0, /* 010000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 51 0x33 '3' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x07, 0xc0, /* 000001111100 */
+ 0x0f, 0xc0, /* 000011111100 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0x60, 0x40, /* 011000000100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 52 0x34 '4' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x05, 0x80, /* 000001011000 */
+ 0x05, 0x80, /* 000001011000 */
+ 0x09, 0x80, /* 000010011000 */
+ 0x09, 0x80, /* 000010011000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x21, 0x80, /* 001000011000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 53 0x35 '5' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xc0, /* 000011111100 */
+ 0x0f, 0xc0, /* 000011111100 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x31, 0xc0, /* 001100011100 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 54 0x36 '6' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x67, 0x80, /* 011001111000 */
+ 0x6f, 0xc0, /* 011011111100 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 55 0x37 '7' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x60, 0x40, /* 011000000100 */
+ 0x00, 0x40, /* 000000000100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0x80, /* 000000001000 */
+ 0x00, 0x80, /* 000000001000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x00, /* 000000010000 */
+ 0x01, 0x00, /* 000000010000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 56 0x38 '8' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 57 0x39 '9' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x3f, 0x60, /* 001111110110 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x3c, 0x00, /* 001111000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 58 0x3a ':' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 59 0x3b ';' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 60 0x3c '<' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 61 0x3d '=' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 62 0x3e '>' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 63 0x3f '?' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 64 0x40 '@' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x67, 0x20, /* 011001110010 */
+ 0x6f, 0xa0, /* 011011111010 */
+ 0x6c, 0xa0, /* 011011001010 */
+ 0x6c, 0xa0, /* 011011001010 */
+ 0x67, 0xe0, /* 011001111110 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 65 0x41 'A' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x09, 0x00, /* 000010010000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x10, 0x80, /* 000100001000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x20, 0x40, /* 001000000100 */
+ 0x40, 0x60, /* 010000000110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0xe0, 0xf0, /* 111000001111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 66 0x42 'B' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0x00, /* 111111110000 */
+ 0x60, 0x80, /* 011000001000 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x7f, 0x80, /* 011111111000 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0xff, 0x80, /* 111111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 67 0x43 'C' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xc0, /* 000011111100 */
+ 0x10, 0x60, /* 000100000110 */
+ 0x20, 0x20, /* 001000000010 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x18, 0x40, /* 000110000100 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 68 0x44 'D' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0x00, /* 111111110000 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x40, /* 011000000100 */
+ 0x61, 0x80, /* 011000011000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 69 0x45 'E' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 70 0x46 'F' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 71 0x47 'G' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xc0, /* 000011111100 */
+ 0x10, 0x60, /* 000100000110 */
+ 0x20, 0x20, /* 001000000010 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x61, 0xf0, /* 011000011111 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x20, 0x60, /* 001000000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 72 0x48 'H' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 73 0x49 'I' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 74 0x4a 'J' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 75 0x4b 'K' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0xe0, /* 111100001110 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x63, 0x00, /* 011000110000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x6c, 0x00, /* 011011000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x6e, 0x00, /* 011011100000 */
+ 0x67, 0x00, /* 011001110000 */
+ 0x63, 0x80, /* 011000111000 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x60, 0xe0, /* 011000001110 */
+ 0xf0, 0x70, /* 111100000111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 76 0x4c 'L' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 77 0x4d 'M' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xe0, 0x70, /* 111000000111 */
+ 0x60, 0xe0, /* 011000001110 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x59, 0x60, /* 010110010110 */
+ 0x59, 0x60, /* 010110010110 */
+ 0x59, 0x60, /* 010110010110 */
+ 0x4d, 0x60, /* 010011010110 */
+ 0x4e, 0x60, /* 010011100110 */
+ 0x4e, 0x60, /* 010011100110 */
+ 0x44, 0x60, /* 010001000110 */
+ 0x44, 0x60, /* 010001000110 */
+ 0xe4, 0xf0, /* 111001001111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 78 0x4e 'N' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xc0, 0x70, /* 110000000111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x70, 0x20, /* 011100000010 */
+ 0x78, 0x20, /* 011110000010 */
+ 0x58, 0x20, /* 010110000010 */
+ 0x4c, 0x20, /* 010011000010 */
+ 0x46, 0x20, /* 010001100010 */
+ 0x47, 0x20, /* 010001110010 */
+ 0x43, 0x20, /* 010000110010 */
+ 0x41, 0xa0, /* 010000011010 */
+ 0x40, 0xe0, /* 010000001110 */
+ 0x40, 0xe0, /* 010000001110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0xe0, 0x30, /* 111000000011 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 79 0x4f 'O' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x20, 0x60, /* 001000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x20, 0x40, /* 001000000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 80 0x50 'P' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0x80, /* 011111111000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x37, 0x80, /* 001101111000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 81 0x51 'Q' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x20, 0x60, /* 001000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x38, 0x40, /* 001110000100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x23, 0x90, /* 001000111001 */
+ 0x01, 0xe0, /* 000000011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 82 0x52 'R' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0x00, /* 111111110000 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0x80, /* 011000001000 */
+ 0x7f, 0x00, /* 011111110000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x6e, 0x00, /* 011011100000 */
+ 0x67, 0x00, /* 011001110000 */
+ 0x63, 0x80, /* 011000111000 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x60, 0xe0, /* 011000001110 */
+ 0xf0, 0x70, /* 111100000111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 83 0x53 'S' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x3c, 0x00, /* 001111000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x7f, 0x80, /* 011111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 84 0x54 'T' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x46, 0x20, /* 010001100010 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 85 0x55 'U' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0x70, /* 111100000111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 86 0x56 'V' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xe0, 0xe0, /* 111000001110 */
+ 0x60, 0x40, /* 011000000100 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x19, 0x00, /* 000110010000 */
+ 0x19, 0x00, /* 000110010000 */
+ 0x19, 0x00, /* 000110010000 */
+ 0x0a, 0x00, /* 000010100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 87 0x57 'W' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xfe, 0xf0, /* 111111101111 */
+ 0x66, 0x20, /* 011001100010 */
+ 0x66, 0x20, /* 011001100010 */
+ 0x66, 0x20, /* 011001100010 */
+ 0x76, 0x20, /* 011101100010 */
+ 0x77, 0x40, /* 011101110100 */
+ 0x33, 0x40, /* 001100110100 */
+ 0x37, 0x40, /* 001101110100 */
+ 0x3b, 0xc0, /* 001110111100 */
+ 0x3b, 0x80, /* 001110111000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 88 0x58 'X' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0x70, /* 111100000111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x40, 0x60, /* 010000000110 */
+ 0xe0, 0xf0, /* 111000001111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 89 0x59 'Y' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0x70, /* 111100000111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 90 0x5a 'Z' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x20, /* 000110000010 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 91 0x5b '[' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 92 0x5c '\' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 93 0x5d ']' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 94 0x5e '^' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1b, 0x00, /* 000110110000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 95 0x5f '_' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 96 0x60 '`' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x00, /* 000000010000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 97 0x61 'a' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0xe0, /* 000111101110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 98 0x62 'b' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0xe0, 0x00, /* 111000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x67, 0x80, /* 011001111000 */
+ 0x6f, 0xc0, /* 011011111100 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x60, /* 011100000110 */
+ 0x78, 0xc0, /* 011110001100 */
+ 0x4f, 0x80, /* 010011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 99 0x63 'c' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x31, 0xc0, /* 001100011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 100 0x64 'd' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x0f, 0x60, /* 000011110110 */
+ 0x31, 0xe0, /* 001100011110 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0xe0, /* 011100001110 */
+ 0x39, 0x60, /* 001110010110 */
+ 0x1e, 0x70, /* 000111100111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 101 0x65 'e' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 102 0x66 'f' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x04, 0xc0, /* 000001001100 */
+ 0x04, 0xc0, /* 000001001100 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 103 0x67 'g' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x20, /* 000111110010 */
+ 0x31, 0xe0, /* 001100011110 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x3f, 0x00, /* 001111110000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x20, 0x60, /* 001000000110 */
+ 0x40, 0x20, /* 010000000010 */
+ 0x40, 0x20, /* 010000000010 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 104 0x68 'h' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x37, 0x80, /* 001101111000 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 105 0x69 'i' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 106 0x6a 'j' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 107 0x6b 'k' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0xe0, 0x00, /* 111000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x63, 0x00, /* 011000110000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x7c, 0x00, /* 011111000000 */
+ 0x6e, 0x00, /* 011011100000 */
+ 0x67, 0x00, /* 011001110000 */
+ 0x63, 0x80, /* 011000111000 */
+ 0xf1, 0xe0, /* 111100011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 108 0x6c 'l' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 109 0x6d 'm' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xdd, 0xc0, /* 110111011100 */
+ 0x6e, 0xe0, /* 011011101110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0xef, 0x70, /* 111011110111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 110 0x6e 'n' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x27, 0x80, /* 001001111000 */
+ 0x79, 0xc0, /* 011110011100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 111 0x6f 'o' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 112 0x70 'p' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xef, 0x80, /* 111011111000 */
+ 0x71, 0xc0, /* 011100011100 */
+ 0x60, 0xe0, /* 011000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x40, /* 011000000100 */
+ 0x70, 0x80, /* 011100001000 */
+ 0x7f, 0x00, /* 011111110000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0xf0, 0x00, /* 111100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 113 0x71 'q' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x20, /* 000011110010 */
+ 0x11, 0xe0, /* 000100011110 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x60, /* 011100000110 */
+ 0x38, 0xe0, /* 001110001110 */
+ 0x1f, 0xe0, /* 000111111110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0xf0, /* 000000001111 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 114 0x72 'r' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x73, 0x80, /* 011100111000 */
+ 0x34, 0xc0, /* 001101001100 */
+ 0x38, 0xc0, /* 001110001100 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 115 0x73 's' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 116 0x74 't' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x20, /* 000011000010 */
+ 0x0e, 0x40, /* 000011100100 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 117 0x75 'u' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 118 0x76 'v' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0x70, /* 111100000111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 119 0x77 'w' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0x70, /* 111111110111 */
+ 0x66, 0x20, /* 011001100010 */
+ 0x66, 0x20, /* 011001100010 */
+ 0x66, 0x20, /* 011001100010 */
+ 0x37, 0x40, /* 001101110100 */
+ 0x3b, 0x40, /* 001110110100 */
+ 0x3b, 0x40, /* 001110110100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 120 0x78 'x' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf8, 0xf0, /* 111110001111 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1d, 0x00, /* 000111010000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0b, 0x80, /* 000010111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0xf1, 0xf0, /* 111100011111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 121 0x79 'y' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x08, 0x00, /* 000010000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 122 0x7a 'z' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0xe0, /* 011000001110 */
+ 0x41, 0xc0, /* 010000011100 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x38, 0x20, /* 001110000010 */
+ 0x70, 0x60, /* 011100000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 123 0x7b '{' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 124 0x7c '|' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 125 0x7d '}' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 126 0x7e '~' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1c, 0x20, /* 000111000010 */
+ 0x3e, 0x60, /* 001111100110 */
+ 0x67, 0xc0, /* 011001111100 */
+ 0x43, 0x80, /* 010000111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 127 0x7f '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 128 0x80 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xc0, /* 000011111100 */
+ 0x10, 0x60, /* 000100000110 */
+ 0x20, 0x20, /* 001000000010 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x20, 0x00, /* 001000000000 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x18, 0x40, /* 000110000100 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 129 0x81 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 130 0x82 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 131 0x83 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0xe0, /* 000111101110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 132 0x84 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0xe0, /* 000111101110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 133 0x85 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0xe0, /* 000111101110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 134 0x86 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0xe0, /* 000111101110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 135 0x87 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x31, 0xc0, /* 001100011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 136 0x88 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 137 0x89 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 138 0x8a '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 139 0x8b '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 140 0x8c '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x1b, 0x00, /* 000110110000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 141 0x8d '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 142 0x8e '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0xe0, 0xf0, /* 111000001111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 143 0x8f '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x11, 0x80, /* 000100011000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0xe0, 0xf0, /* 111000001111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 144 0x90 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x08, 0x00, /* 000010000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x3f, 0x80, /* 001111111000 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x30, 0x20, /* 001100000010 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 145 0x91 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3d, 0xe0, /* 001111011110 */
+ 0x66, 0x30, /* 011001100011 */
+ 0x46, 0x30, /* 010001100011 */
+ 0x06, 0x30, /* 000001100011 */
+ 0x3f, 0xf0, /* 001111111111 */
+ 0x66, 0x00, /* 011001100000 */
+ 0xc6, 0x00, /* 110001100000 */
+ 0xc6, 0x00, /* 110001100000 */
+ 0xe7, 0x30, /* 111001110011 */
+ 0x7d, 0xe0, /* 011111011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 146 0x92 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x07, 0x10, /* 000001110001 */
+ 0x07, 0x10, /* 000001110001 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x0b, 0x00, /* 000010110000 */
+ 0x0b, 0x20, /* 000010110010 */
+ 0x13, 0xe0, /* 000100111110 */
+ 0x13, 0x20, /* 000100110010 */
+ 0x3f, 0x00, /* 001111110000 */
+ 0x23, 0x00, /* 001000110000 */
+ 0x23, 0x00, /* 001000110000 */
+ 0x43, 0x10, /* 010000110001 */
+ 0x43, 0x10, /* 010000110001 */
+ 0xe7, 0xf0, /* 111001111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 147 0x93 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 148 0x94 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 149 0x95 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 150 0x96 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 151 0x97 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 152 0x98 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xf0, 0xf0, /* 111100001111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x0d, 0x00, /* 000011010000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x04, 0x00, /* 000001000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x08, 0x00, /* 000010000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 153 0x99 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x20, 0x60, /* 001000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x20, 0x40, /* 001000000100 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x18, 0x80, /* 000110001000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 154 0x9a '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0xe0, 0x30, /* 111000000011 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 155 0x9b '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x36, 0xc0, /* 001101101100 */
+ 0x26, 0xc0, /* 001001101100 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x76, 0x40, /* 011101100100 */
+ 0x36, 0xc0, /* 001101101100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 156 0x9c '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x7e, 0x00, /* 011111100000 */
+ 0x7e, 0x00, /* 011111100000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x3e, 0x20, /* 001111100010 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x61, 0xc0, /* 011000011100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 157 0x9d '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 158 0x9e '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0x80, /* 011111111000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x37, 0x80, /* 001101111000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x37, 0x80, /* 001101111000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x33, 0x30, /* 001100110011 */
+ 0x31, 0xe0, /* 001100011110 */
+ 0x78, 0xc0, /* 011110001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 159 0x9f '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0xe0, /* 000000011110 */
+ 0x03, 0x30, /* 000000110011 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x7f, 0xc0, /* 011111111100 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xcc, 0x00, /* 110011000000 */
+ 0x78, 0x00, /* 011110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 160 0xa0 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x03, 0xc0, /* 000000111100 */
+ 0x1c, 0xc0, /* 000111001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0xe0, /* 000111101110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 161 0xa1 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 162 0xa2 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 163 0xa3 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1e, 0x60, /* 000111100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 164 0xa4 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1c, 0x40, /* 000111000100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x23, 0x80, /* 001000111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x27, 0x80, /* 001001111000 */
+ 0x79, 0xc0, /* 011110011100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 165 0xa5 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1c, 0x40, /* 000111000100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x23, 0x80, /* 001000111000 */
+ 0xc0, 0x70, /* 110000000111 */
+ 0x60, 0x20, /* 011000000010 */
+ 0x70, 0x20, /* 011100000010 */
+ 0x78, 0x20, /* 011110000010 */
+ 0x5c, 0x20, /* 010111000010 */
+ 0x4e, 0x20, /* 010011100010 */
+ 0x47, 0x20, /* 010001110010 */
+ 0x43, 0xa0, /* 010000111010 */
+ 0x41, 0xe0, /* 010000011110 */
+ 0x40, 0xe0, /* 010000001110 */
+ 0x40, 0x60, /* 010000000110 */
+ 0xe0, 0x30, /* 111000000011 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 166 0xa6 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x33, 0x80, /* 001100111000 */
+ 0x1d, 0xc0, /* 000111011100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 167 0xa7 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x10, 0xc0, /* 000100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0x80, /* 001100001000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 168 0xa8 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x40, /* 001100000100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 169 0xa9 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 170 0xaa '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 171 0xab '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x10, 0x40, /* 000100000100 */
+ 0x10, 0x80, /* 000100001000 */
+ 0x11, 0x00, /* 000100010000 */
+ 0x3a, 0x00, /* 001110100000 */
+ 0x05, 0xc0, /* 000001011100 */
+ 0x0a, 0x20, /* 000010100010 */
+ 0x10, 0x20, /* 000100000010 */
+ 0x20, 0xc0, /* 001000001100 */
+ 0x41, 0x00, /* 010000010000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x03, 0xe0, /* 000000111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 172 0xac '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x10, 0x00, /* 000100000000 */
+ 0x10, 0x40, /* 000100000100 */
+ 0x10, 0x80, /* 000100001000 */
+ 0x11, 0x00, /* 000100010000 */
+ 0x3a, 0x40, /* 001110100100 */
+ 0x04, 0xc0, /* 000001001100 */
+ 0x09, 0x40, /* 000010010100 */
+ 0x12, 0x40, /* 000100100100 */
+ 0x24, 0x40, /* 001001000100 */
+ 0x47, 0xe0, /* 010001111110 */
+ 0x00, 0x40, /* 000000000100 */
+ 0x00, 0x40, /* 000000000100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 173 0xad '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 174 0xae '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 175 0xaf '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x06, 0x60, /* 000001100110 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x33, 0x00, /* 001100110000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 176 0xb0 '.' */
+ 0x0c, 0x30, /* 000011000011 */
+ 0x08, 0x20, /* 000010000010 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x20, 0x80, /* 001000001000 */
+ 0x0c, 0x30, /* 000011000011 */
+ 0x08, 0x20, /* 000010000010 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x20, 0x80, /* 001000001000 */
+ 0x0c, 0x30, /* 000011000011 */
+ 0x08, 0x20, /* 000010000010 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x20, 0x80, /* 001000001000 */
+ 0x0c, 0x30, /* 000011000011 */
+ 0x08, 0x20, /* 000010000010 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x20, 0x80, /* 001000001000 */
+ 0x0c, 0x30, /* 000011000011 */
+ 0x08, 0x20, /* 000010000010 */
+ 0x61, 0x80, /* 011000011000 */
+ 0x20, 0x80, /* 001000001000 */
+ 0x0c, 0x30, /* 000011000011 */
+ 0x08, 0x20, /* 000010000010 */
+
+ /* 177 0xb1 '.' */
+ 0x77, 0x70, /* 011101110111 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x88, 0x80, /* 100010001000 */
+ 0xdd, 0xd0, /* 110111011101 */
+ 0x88, 0x80, /* 100010001000 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x77, 0x70, /* 011101110111 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x88, 0x80, /* 100010001000 */
+ 0xdd, 0xd0, /* 110111011101 */
+ 0x88, 0x80, /* 100010001000 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x77, 0x70, /* 011101110111 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x88, 0x80, /* 100010001000 */
+ 0xdd, 0xd0, /* 110111011101 */
+ 0x88, 0x80, /* 100010001000 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x77, 0x70, /* 011101110111 */
+ 0x22, 0x20, /* 001000100010 */
+ 0x88, 0x80, /* 100010001000 */
+ 0xdd, 0xd0, /* 110111011101 */
+
+ /* 178 0xb2 '.' */
+ 0xf3, 0xc0, /* 111100111100 */
+ 0xf7, 0xd0, /* 111101111101 */
+ 0x9e, 0x70, /* 100111100111 */
+ 0xdf, 0x70, /* 110111110111 */
+ 0xf3, 0xc0, /* 111100111100 */
+ 0xf7, 0xd0, /* 111101111101 */
+ 0x9e, 0x70, /* 100111100111 */
+ 0xdf, 0x70, /* 110111110111 */
+ 0xf3, 0xc0, /* 111100111100 */
+ 0xf7, 0xd0, /* 111101111101 */
+ 0x9e, 0x70, /* 100111100111 */
+ 0xdf, 0x70, /* 110111110111 */
+ 0xf3, 0xc0, /* 111100111100 */
+ 0xf7, 0xd0, /* 111101111101 */
+ 0x9e, 0x70, /* 100111100111 */
+ 0xdf, 0x70, /* 110111110111 */
+ 0xf3, 0xc0, /* 111100111100 */
+ 0xf7, 0xd0, /* 111101111101 */
+ 0x9e, 0x70, /* 100111100111 */
+ 0xdf, 0x70, /* 110111110111 */
+ 0xf3, 0xc0, /* 111100111100 */
+ 0xf7, 0xd0, /* 111101111101 */
+
+ /* 179 0xb3 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 180 0xb4 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 181 0xb5 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 182 0xb6 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 183 0xb7 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 184 0xb8 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 185 0xb9 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 186 0xba '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 187 0xbb '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 188 0xbc '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0xfd, 0x80, /* 111111011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 189 0xbd '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0xff, 0x80, /* 111111111000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 190 0xbe '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 191 0xbf '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 192 0xc0 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 193 0xc1 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 194 0xc2 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 195 0xc3 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 196 0xc4 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 197 0xc5 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 198 0xc6 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 199 0xc7 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 200 0xc8 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 201 0xc9 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 202 0xca '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 203 0xcb '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 204 0xcc '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0xf0, /* 000011011111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 205 0xcd '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 206 0xce '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0xfd, 0xf0, /* 111111011111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 207 0xcf '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 208 0xd0 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 209 0xd1 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 210 0xd2 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 211 0xd3 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 212 0xd4 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 213 0xd5 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 214 0xd6 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x0f, 0xf0, /* 000011111111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 215 0xd7 '.' */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+ 0x0d, 0x80, /* 000011011000 */
+
+ /* 216 0xd8 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 217 0xd9 '.' */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0xfe, 0x00, /* 111111100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 218 0xda '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x07, 0xf0, /* 000001111111 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+
+ /* 219 0xdb '.' */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+
+ /* 220 0xdc '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+
+ /* 221 0xdd '.' */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+ 0xfc, 0x00, /* 111111000000 */
+
+ /* 222 0xde '.' */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+ 0x03, 0xf0, /* 000000111111 */
+
+ /* 223 0xdf '.' */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0xff, 0xf0, /* 111111111111 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 224 0xe0 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x60, /* 000011110110 */
+ 0x13, 0xe0, /* 000100111110 */
+ 0x21, 0xc0, /* 001000011100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x70, 0x80, /* 011100001000 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x1f, 0x60, /* 000111110110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 225 0xe1 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x37, 0x80, /* 001101111000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x77, 0x00, /* 011101110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 226 0xe2 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x3f, 0xe0, /* 001111111110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 227 0xe3 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 228 0xe4 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x60, /* 001100000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 229 0xe5 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0xe0, /* 000001111110 */
+ 0x0f, 0xe0, /* 000011111110 */
+ 0x13, 0x80, /* 000100111000 */
+ 0x21, 0xc0, /* 001000011100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x60, 0xc0, /* 011000001100 */
+ 0x70, 0x80, /* 011100001000 */
+ 0x39, 0x00, /* 001110010000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 230 0xe6 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x36, 0xe0, /* 001101101110 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 231 0xe7 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 232 0xe8 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 233 0xe9 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 234 0xea '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0xd9, 0xb0, /* 110110011011 */
+ 0x79, 0xe0, /* 011110011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 235 0xeb '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0x80, /* 000001111000 */
+ 0x0c, 0xc0, /* 000011001100 */
+ 0x18, 0x60, /* 000110000110 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x0f, 0x80, /* 000011111000 */
+ 0x11, 0xc0, /* 000100011100 */
+ 0x20, 0xe0, /* 001000001110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x60, 0x60, /* 011000000110 */
+ 0x70, 0x40, /* 011100000100 */
+ 0x38, 0x80, /* 001110001000 */
+ 0x1f, 0x00, /* 000111110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 236 0xec '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x6f, 0x60, /* 011011110110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0xc6, 0x30, /* 110001100011 */
+ 0xc6, 0x30, /* 110001100011 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x6f, 0x60, /* 011011110110 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 237 0xed '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x00, 0xc0, /* 000000001100 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x01, 0x80, /* 000000011000 */
+ 0x3b, 0xc0, /* 001110111100 */
+ 0x6f, 0x60, /* 011011110110 */
+ 0x66, 0x60, /* 011001100110 */
+ 0xc6, 0x30, /* 110001100011 */
+ 0xc6, 0x30, /* 110001100011 */
+ 0x66, 0x60, /* 011001100110 */
+ 0x6f, 0x60, /* 011011110110 */
+ 0x3d, 0xc0, /* 001111011100 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x30, 0x00, /* 001100000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 238 0xee '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x1f, 0xc0, /* 000111111100 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 239 0xef '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x30, 0xc0, /* 001100001100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 240 0xf0 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 241 0xf1 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 242 0xf2 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x00, 0xe0, /* 000000001110 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x60, 0x00, /* 011000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 243 0xf3 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x70, 0x00, /* 011100000000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x07, 0x00, /* 000001110000 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x00, 0x60, /* 000000000110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 244 0xf4 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x03, 0x80, /* 000000111000 */
+ 0x07, 0xc0, /* 000001111100 */
+ 0x0c, 0x60, /* 000011000110 */
+ 0x0c, 0x60, /* 000011000110 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x0c, 0x00, /* 000011000000 */
+
+ /* 245 0xf5 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x3e, 0x00, /* 001111100000 */
+ 0x63, 0x00, /* 011000110000 */
+ 0x63, 0x00, /* 011000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x03, 0x00, /* 000000110000 */
+
+ /* 246 0xf6 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x7f, 0xe0, /* 011111111110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 247 0xf7 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x38, 0x00, /* 001110000000 */
+ 0x6c, 0x00, /* 011011000000 */
+ 0x06, 0x30, /* 000001100011 */
+ 0x03, 0x60, /* 000000110110 */
+ 0x39, 0xc0, /* 001110011100 */
+ 0x6c, 0x00, /* 011011000000 */
+ 0x06, 0x30, /* 000001100011 */
+ 0x03, 0x60, /* 000000110110 */
+ 0x01, 0xc0, /* 000000011100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 248 0xf8 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x19, 0x80, /* 000110011000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 249 0xf9 '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x3e, 0x00, /* 001111100000 */
+ 0x3e, 0x00, /* 001111100000 */
+ 0x3e, 0x00, /* 001111100000 */
+ 0x1c, 0x00, /* 000111000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 250 0xfa '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x3c, 0x00, /* 001111000000 */
+ 0x3c, 0x00, /* 001111000000 */
+ 0x18, 0x00, /* 000110000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 251 0xfb '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x07, 0xe0, /* 000001111110 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0xc6, 0x00, /* 110001100000 */
+ 0x66, 0x00, /* 011001100000 */
+ 0x36, 0x00, /* 001101100000 */
+ 0x1e, 0x00, /* 000111100000 */
+ 0x0e, 0x00, /* 000011100000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x02, 0x00, /* 000000100000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 252 0xfc '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x13, 0x80, /* 000100111000 */
+ 0x3d, 0xc0, /* 001111011100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x18, 0xc0, /* 000110001100 */
+ 0x3d, 0xe0, /* 001111011110 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 253 0xfd '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x0f, 0x00, /* 000011110000 */
+ 0x1f, 0x80, /* 000111111000 */
+ 0x31, 0x80, /* 001100011000 */
+ 0x21, 0x80, /* 001000011000 */
+ 0x03, 0x00, /* 000000110000 */
+ 0x06, 0x00, /* 000001100000 */
+ 0x0c, 0x00, /* 000011000000 */
+ 0x18, 0x40, /* 000110000100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 254 0xfe '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x3f, 0xc0, /* 001111111100 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+
+ /* 255 0xff '.' */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+ 0x00, 0x00, /* 000000000000 */
+} };
+
+
+const struct font_desc font_sun_12x22 = {
+ .idx = SUN12x22_IDX,
+ .name = "SUN12x22",
+ .width = 12,
+ .height = 22,
+ .charcount = 256,
+ .data = fontdata_sun12x22.data,
+#ifdef __sparc__
+ .pref = 5,
+#else
+ .pref = -1,
+#endif
+};
diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c
new file mode 100644
index 000000000..81bb4eeae
--- /dev/null
+++ b/lib/fonts/font_sun8x16.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+
+#define FONTDATAMAX 4096
+
+static const struct font_data fontdata_sun8x16 = {
+{ 0, 0, FONTDATAMAX, 0 }, {
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x6c,0xfe,0xfe,0xfe,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x7c,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x18,0x3c,0x3c,0xe7,0xe7,0xe7,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x18,0x3c,0x7e,0xff,0xff,0x7e,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xc3,0xc3,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x3c,0x66,0x42,0x42,0x66,0x3c,0x00,0x00,0x00,0x00,0x00,
+/* */ 0xff,0xff,0xff,0xff,0xff,0xc3,0x99,0xbd,0xbd,0x99,0xc3,0xff,0xff,0xff,0xff,0xff,
+/* */ 0x00,0x00,0x1e,0x0e,0x1a,0x32,0x78,0xcc,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x3c,0x66,0x66,0x66,0x66,0x3c,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x3f,0x33,0x3f,0x30,0x30,0x30,0x30,0x70,0xf0,0xe0,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x7f,0x63,0x7f,0x63,0x63,0x63,0x63,0x67,0xe7,0xe6,0xc0,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x18,0x18,0xdb,0x3c,0xe7,0x3c,0xdb,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x80,0xc0,0xe0,0xf0,0xf8,0xfe,0xf8,0xf0,0xe0,0xc0,0x80,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x02,0x06,0x0e,0x1e,0x3e,0xfe,0x3e,0x1e,0x0e,0x06,0x02,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x66,0x66,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x7f,0xdb,0xdb,0xdb,0x7b,0x1b,0x1b,0x1b,0x1b,0x1b,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x7c,0xc6,0x60,0x38,0x6c,0xc6,0xc6,0x6c,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xfe,0xfe,0xfe,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x18,0x0c,0xfe,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x30,0x60,0xfe,0x60,0x30,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xc0,0xc0,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x24,0x66,0xff,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x38,0x7c,0x7c,0xfe,0xfe,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0xfe,0xfe,0x7c,0x7c,0x38,0x38,0x10,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*!*/ 0x00,0x00,0x18,0x3c,0x3c,0x3c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00,
+/*"*/ 0x00,0x66,0x66,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*#*/ 0x00,0x00,0x00,0x6c,0x6c,0xfe,0x6c,0x6c,0x6c,0xfe,0x6c,0x6c,0x00,0x00,0x00,0x00,
+/*$*/ 0x18,0x18,0x7c,0xc6,0xc2,0xc0,0x7c,0x06,0x06,0x86,0xc6,0x7c,0x18,0x18,0x00,0x00,
+/*%*/ 0x00,0x00,0x00,0x00,0xc2,0xc6,0x0c,0x18,0x30,0x60,0xc6,0x86,0x00,0x00,0x00,0x00,
+/*&*/ 0x00,0x00,0x38,0x6c,0x6c,0x38,0x76,0xdc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/*'*/ 0x00,0x30,0x30,0x30,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*(*/ 0x00,0x00,0x0c,0x18,0x30,0x30,0x30,0x30,0x30,0x30,0x18,0x0c,0x00,0x00,0x00,0x00,
+/*)*/ 0x00,0x00,0x30,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x30,0x00,0x00,0x00,0x00,
+/***/ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0xff,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00,
+/*+*/ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,
+/*,*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x18,0x30,0x00,0x00,0x00,
+/*-*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*.*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x02,0x06,0x0c,0x18,0x30,0x60,0xc0,0x80,0x00,0x00,0x00,0x00,
+/*0*/ 0x00,0x00,0x7c,0xc6,0xc6,0xce,0xde,0xf6,0xe6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*1*/ 0x00,0x00,0x18,0x38,0x78,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x00,0x00,0x00,0x00,
+/*2*/ 0x00,0x00,0x7c,0xc6,0x06,0x0c,0x18,0x30,0x60,0xc0,0xc6,0xfe,0x00,0x00,0x00,0x00,
+/*3*/ 0x00,0x00,0x7c,0xc6,0x06,0x06,0x3c,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*4*/ 0x00,0x00,0x0c,0x1c,0x3c,0x6c,0xcc,0xfe,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00,
+/*5*/ 0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xfc,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*6*/ 0x00,0x00,0x38,0x60,0xc0,0xc0,0xfc,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*7*/ 0x00,0x00,0xfe,0xc6,0x06,0x06,0x0c,0x18,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00,
+/*8*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7c,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*9*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7e,0x06,0x06,0x06,0x0c,0x78,0x00,0x00,0x00,0x00,
+/*:*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,
+/*;*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x30,0x00,0x00,0x00,0x00,
+/*<*/ 0x00,0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00,
+/*=*/ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*>*/ 0x00,0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00,
+/*?*/ 0x00,0x00,0x7c,0xc6,0xc6,0x0c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00,
+/*@*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xde,0xde,0xde,0xdc,0xc0,0x7c,0x00,0x00,0x00,0x00,
+/*A*/ 0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/*B*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x66,0x66,0x66,0x66,0xfc,0x00,0x00,0x00,0x00,
+/*C*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x00,0x00,0x00,0x00,
+/*D*/ 0x00,0x00,0xf8,0x6c,0x66,0x66,0x66,0x66,0x66,0x66,0x6c,0xf8,0x00,0x00,0x00,0x00,
+/*E*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00,
+/*F*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
+/*G*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xde,0xc6,0xc6,0x66,0x3a,0x00,0x00,0x00,0x00,
+/*H*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/*I*/ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/*J*/ 0x00,0x00,0x1e,0x0c,0x0c,0x0c,0x0c,0x0c,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00,
+/*K*/ 0x00,0x00,0xe6,0x66,0x66,0x6c,0x78,0x78,0x6c,0x66,0x66,0xe6,0x00,0x00,0x00,0x00,
+/*L*/ 0x00,0x00,0xf0,0x60,0x60,0x60,0x60,0x60,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00,
+/*M*/ 0x00,0x00,0xc3,0xe7,0xff,0xff,0xdb,0xc3,0xc3,0xc3,0xc3,0xc3,0x00,0x00,0x00,0x00,
+/*N*/ 0x00,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/*O*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*P*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
+/*Q*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xd6,0xde,0x7c,0x0c,0x0e,0x00,0x00,
+/*R*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x6c,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00,
+/*S*/ 0x00,0x00,0x7c,0xc6,0xc6,0x60,0x38,0x0c,0x06,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*T*/ 0x00,0x00,0xff,0xdb,0x99,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/*U*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*V*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00,
+/*W*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x66,0x00,0x00,0x00,0x00,
+/*X*/ 0x00,0x00,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x3c,0x66,0xc3,0xc3,0x00,0x00,0x00,0x00,
+/*Y*/ 0x00,0x00,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/*Z*/ 0x00,0x00,0xff,0xc3,0x86,0x0c,0x18,0x30,0x60,0xc1,0xc3,0xff,0x00,0x00,0x00,0x00,
+/*[*/ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00,
+/*\*/ 0x00,0x00,0x00,0x80,0xc0,0xe0,0x70,0x38,0x1c,0x0e,0x06,0x02,0x00,0x00,0x00,0x00,
+/*]*/ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00,
+/*^*/ 0x10,0x38,0x6c,0xc6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*_*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00,
+/*`*/ 0x30,0x30,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/*a*/ 0x00,0x00,0x00,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/*b*/ 0x00,0x00,0xe0,0x60,0x60,0x78,0x6c,0x66,0x66,0x66,0x66,0x7c,0x00,0x00,0x00,0x00,
+/*c*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc0,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*d*/ 0x00,0x00,0x1c,0x0c,0x0c,0x3c,0x6c,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/*e*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*f*/ 0x00,0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
+/*g*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0xcc,0x78,0x00,
+/*h*/ 0x00,0x00,0xe0,0x60,0x60,0x6c,0x76,0x66,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00,
+/*i*/ 0x00,0x00,0x18,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/*j*/ 0x00,0x00,0x06,0x06,0x00,0x0e,0x06,0x06,0x06,0x06,0x06,0x06,0x66,0x66,0x3c,0x00,
+/*k*/ 0x00,0x00,0xe0,0x60,0x60,0x66,0x6c,0x78,0x78,0x6c,0x66,0xe6,0x00,0x00,0x00,0x00,
+/*l*/ 0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/*m*/ 0x00,0x00,0x00,0x00,0x00,0xe6,0xff,0xdb,0xdb,0xdb,0xdb,0xdb,0x00,0x00,0x00,0x00,
+/*n*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00,
+/*o*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*p*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xf0,0x00,
+/*q*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0x0c,0x1e,0x00,
+/*r*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x76,0x66,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00,
+/*s*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0x60,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/*t*/ 0x00,0x00,0x10,0x30,0x30,0xfc,0x30,0x30,0x30,0x30,0x36,0x1c,0x00,0x00,0x00,0x00,
+/*u*/ 0x00,0x00,0x00,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/*v*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00,
+/*w*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x00,0x00,0x00,0x00,
+/*x*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0x66,0x3c,0x18,0x3c,0x66,0xc3,0x00,0x00,0x00,0x00,
+/*y*/ 0x00,0x00,0x00,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0xf8,0x00,
+/*z*/ 0x00,0x00,0x00,0x00,0x00,0xfe,0xcc,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00,
+/*{*/ 0x00,0x00,0x0e,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00,0x00,
+/*|*/ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
+/*}*/ 0x00,0x00,0x70,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00,0x00,
+/*~*/ 0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xc6,0xfe,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x0c,0x06,0x7c,0x00,0x00,
+/* */ 0x00,0x00,0xcc,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x0c,0x18,0x30,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x10,0x38,0x6c,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0xcc,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x60,0x30,0x18,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x38,0x6c,0x38,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x3c,0x66,0x60,0x60,0x66,0x3c,0x0c,0x06,0x3c,0x00,0x00,0x00,
+/* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x66,0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x18,0x3c,0x66,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x60,0x30,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0xc6,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/* */ 0x38,0x6c,0x38,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/* */ 0x18,0x30,0x60,0x00,0xfe,0x66,0x60,0x7c,0x60,0x60,0x66,0xfe,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x1b,0x7e,0xd8,0xdc,0x77,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x3e,0x6c,0xcc,0xcc,0xfe,0xcc,0xcc,0xcc,0xcc,0xce,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x30,0x78,0xcc,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x60,0x30,0x18,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0xc6,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0x78,0x00,
+/* */ 0x00,0xc6,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0xc6,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x18,0x18,0x7e,0xc3,0xc0,0xc0,0xc0,0xc3,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xe6,0xfc,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0xc3,0x66,0x3c,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0xfc,0x66,0x66,0x7c,0x62,0x66,0x6f,0x66,0x66,0x66,0xf3,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x0e,0x1b,0x18,0x18,0x18,0x7e,0x18,0x18,0x18,0x18,0x18,0xd8,0x70,0x00,0x00,
+/* */ 0x00,0x18,0x30,0x60,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x0c,0x18,0x30,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x18,0x30,0x60,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x18,0x30,0x60,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x76,0xdc,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00,
+/* */ 0x76,0xdc,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x3c,0x6c,0x6c,0x3e,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x7c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x60,0xc0,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x60,0xce,0x9b,0x06,0x0c,0x1f,0x00,0x00,
+/* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x66,0xce,0x96,0x3e,0x06,0x06,0x00,0x00,
+/* */ 0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x3c,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x36,0x6c,0xd8,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0xd8,0x6c,0x36,0x6c,0xd8,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,
+/* */ 0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,
+/* */ 0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x3f,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3f,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,
+/* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
+/* */ 0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,
+/* */ 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,
+/* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0xd8,0xd8,0xd8,0xdc,0x76,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x78,0xcc,0xcc,0xcc,0xd8,0xcc,0xc6,0xc6,0xc6,0xcc,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0xfe,0xc6,0xc6,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0xfe,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0xfe,0xc6,0x60,0x30,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xd8,0xd8,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xc0,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x76,0xdc,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x7e,0x18,0x3c,0x66,0x66,0x66,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0x6c,0x38,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x38,0x6c,0xc6,0xc6,0xc6,0x6c,0x6c,0x6c,0x6c,0xee,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x1e,0x30,0x18,0x0c,0x3e,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xdb,0xdb,0xdb,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x03,0x06,0x7e,0xdb,0xdb,0xf3,0x7e,0x60,0xc0,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x1c,0x30,0x60,0x60,0x7c,0x60,0x60,0x60,0x30,0x1c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0xff,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x00,0x7e,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x00,0x7e,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x0e,0x1b,0x1b,0x1b,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,
+/* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x0f,0x0c,0x0c,0x0c,0x0c,0x0c,0xec,0x6c,0x6c,0x3c,0x1c,0x00,0x00,0x00,0x00,
+/* */ 0x00,0xd8,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00,
+/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+} };
+
+const struct font_desc font_sun_8x16 = {
+ .idx = SUN8x16_IDX,
+ .name = "SUN8x16",
+ .width = 8,
+ .height = 16,
+ .charcount = 256,
+ .data = fontdata_sun8x16.data,
+#ifdef __sparc__
+ .pref = 10,
+#else
+ .pref = -1,
+#endif
+};
diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
new file mode 100644
index 000000000..5baedc573
--- /dev/null
+++ b/lib/fonts/font_ter16x32.c
@@ -0,0 +1,2072 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+#include <linux/module.h>
+
+#define FONTDATAMAX 16384
+
+static const struct font_data fontdata_ter16x32 = {
+ { 0, 0, FONTDATAMAX, 0 }, {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc,
+ 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xee, 0xee, 0xee, 0xee, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xe0, 0x0e, 0xe0, 0x0e, 0xef, 0xee, 0xe7, 0xce,
+ 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xf0, 0x1e,
+ 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xe3, 0x8e, 0xe3, 0x8e, 0xff, 0xfe, 0xff, 0xfe,
+ 0xff, 0xfe, 0xff, 0xfe, 0xe0, 0x0e, 0xf0, 0x1e,
+ 0xf8, 0x3e, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x78, 0x3c, 0xfc, 0x7e, 0xfe, 0xfe, 0xff, 0xfe,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0,
+ 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 3 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x7f, 0xfc, 0xff, 0xfe,
+ 0xff, 0xfe, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0,
+ 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 4 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0f, 0xe0,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0,
+ 0x07, 0xc0, 0x03, 0x80, 0x3b, 0xb8, 0x7f, 0xfc,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0x7f, 0xfc, 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 5 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7b, 0xbc,
+ 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xc0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x03, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xf0, 0x0f,
+ 0xf0, 0x0f, 0xf0, 0x0f, 0xf8, 0x1f, 0xfc, 0x3f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 8 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xc0, 0x07, 0xe0, 0x0e, 0x70, 0x0c, 0x30,
+ 0x0c, 0x30, 0x0e, 0x70, 0x07, 0xe0, 0x03, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 9 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfc, 0x3f, 0xf8, 0x1f, 0xf1, 0x8f, 0xf3, 0xcf,
+ 0xf3, 0xcf, 0xf1, 0x8f, 0xf8, 0x1f, 0xfc, 0x3f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 10 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0x03, 0xfe,
+ 0x00, 0x1e, 0x00, 0x3e, 0x00, 0x76, 0x00, 0xe6,
+ 0x01, 0xc6, 0x03, 0x86, 0x3f, 0xe0, 0x7f, 0xf0,
+ 0xf0, 0x78, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38,
+ 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xf0, 0x78,
+ 0x7f, 0xf0, 0x3f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 11 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8,
+ 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 12 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc,
+ 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0xf8, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 13 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x3e,
+ 0xf0, 0x3c, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 14 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x73, 0x9c, 0x73, 0x9c,
+ 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x7c, 0x7c,
+ 0x7c, 0x7c, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8,
+ 0x73, 0x9c, 0x73, 0x9c, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc0, 0x00, 0xf0, 0x00, 0xfc, 0x00, 0xff, 0x00,
+ 0xff, 0xc0, 0xff, 0xf0, 0xff, 0xfc, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf0, 0xff, 0xc0,
+ 0xff, 0x00, 0xfc, 0x00, 0xf0, 0x00, 0xc0, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 16 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x0f, 0x00, 0x3f, 0x00, 0xff,
+ 0x03, 0xff, 0x0f, 0xff, 0x3f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x3f, 0xff, 0x0f, 0xff, 0x03, 0xff,
+ 0x00, 0xff, 0x00, 0x3f, 0x00, 0x0f, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
+ 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c,
+ 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0,
+ 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 19 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfe, 0x3f, 0xfe,
+ 0x79, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce,
+ 0x71, 0xce, 0x71, 0xce, 0x79, 0xce, 0x3f, 0xce,
+ 0x1f, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce,
+ 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce,
+ 0x01, 0xce, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0xe0, 0x0f, 0xf0, 0x1e, 0x78, 0x1c, 0x38,
+ 0x1c, 0x00, 0x1e, 0x00, 0x0f, 0xc0, 0x0f, 0xe0,
+ 0x1c, 0xf0, 0x1c, 0x78, 0x1c, 0x38, 0x1c, 0x38,
+ 0x1c, 0x38, 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xf0,
+ 0x03, 0xf0, 0x00, 0x78, 0x00, 0x38, 0x1c, 0x38,
+ 0x1e, 0x78, 0x0f, 0xf0, 0x07, 0xe0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 21 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 22 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
+ 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8,
+ 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
+ 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c,
+ 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0,
+ 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 25 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xc0, 0x00, 0xe0, 0x00, 0x70,
+ 0x00, 0x38, 0x00, 0x1c, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 26 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x07, 0x00, 0x0e, 0x00,
+ 0x1c, 0x00, 0x38, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x06, 0x60, 0x0e, 0x70, 0x1c, 0x38,
+ 0x38, 0x1c, 0x70, 0x0e, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x70, 0x0e, 0x38, 0x1c, 0x1c, 0x38,
+ 0x0e, 0x70, 0x06, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 29 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x80, 0x01, 0x80, 0x03, 0xc0, 0x03, 0xc0,
+ 0x07, 0xe0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x3f, 0xfc, 0x3f, 0xfc,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x07, 0xe0,
+ 0x03, 0xc0, 0x03, 0xc0, 0x01, 0x80, 0x01, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 32 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 33 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 35 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0,
+ 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x80,
+ 0x73, 0x80, 0x73, 0x80, 0x7b, 0x80, 0x3f, 0xf0,
+ 0x1f, 0xf8, 0x03, 0xbc, 0x03, 0x9c, 0x03, 0x9c,
+ 0x03, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8,
+ 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1c, 0x3f, 0x9c,
+ 0x3b, 0xb8, 0x3b, 0xb8, 0x3f, 0xf0, 0x1f, 0x70,
+ 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00,
+ 0x0e, 0xf8, 0x0f, 0xfc, 0x1d, 0xdc, 0x1d, 0xdc,
+ 0x39, 0xfc, 0x38, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc0, 0x1f, 0xe0,
+ 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70,
+ 0x38, 0x70, 0x1c, 0xe0, 0x0f, 0xc0, 0x0f, 0x80,
+ 0x1f, 0xce, 0x38, 0xee, 0x70, 0x7c, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x7c,
+ 0x3f, 0xee, 0x1f, 0xce, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x01, 0xc0,
+ 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x03, 0x80,
+ 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 40 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x07, 0x00,
+ 0x03, 0x80, 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 41 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x38, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0,
+ 0x1c, 0x70, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 43 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 44 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 45 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 46 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x70, 0x7c, 0x70, 0xfc, 0x71, 0xdc, 0x73, 0x9c,
+ 0x77, 0x1c, 0x7e, 0x1c, 0x7c, 0x1c, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0x80,
+ 0x0f, 0x80, 0x1f, 0x80, 0x1f, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 49 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x0f, 0xf8,
+ 0x0f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 51 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c,
+ 0x00, 0x7c, 0x00, 0xfc, 0x01, 0xdc, 0x03, 0x9c,
+ 0x07, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 52 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 53 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xf8,
+ 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38,
+ 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0xe0,
+ 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 55 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8,
+ 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c,
+ 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 58 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 59 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x38,
+ 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
+ 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70,
+ 0x00, 0x38, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 61 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x1c, 0x00,
+ 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0,
+ 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x1c, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 62 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xfc,
+ 0x78, 0x0e, 0x70, 0x06, 0x71, 0xfe, 0x73, 0xfe,
+ 0x77, 0x8e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e,
+ 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x9e,
+ 0x73, 0xfe, 0x71, 0xf6, 0x70, 0x00, 0x78, 0x00,
+ 0x3f, 0xfe, 0x1f, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 65 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x38, 0x7f, 0xf0, 0x7f, 0xf0,
+ 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 66 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xc0, 0x7f, 0xf0,
+ 0x70, 0x78, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x70, 0x78,
+ 0x7f, 0xf0, 0x7f, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 68 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 69 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x71, 0xfc,
+ 0x71, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 71 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x0f, 0xe0,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 73 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0xfe,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x78,
+ 0x3f, 0xf0, 0x1f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 74 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x0c, 0x70, 0x1c,
+ 0x70, 0x38, 0x70, 0x70, 0x70, 0xe0, 0x71, 0xc0,
+ 0x73, 0x80, 0x77, 0x00, 0x7e, 0x00, 0x7c, 0x00,
+ 0x7c, 0x00, 0x7e, 0x00, 0x77, 0x00, 0x73, 0x80,
+ 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, 0x70, 0x38,
+ 0x70, 0x1c, 0x70, 0x0c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 75 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 76 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e,
+ 0x78, 0x1e, 0x7c, 0x3e, 0x7e, 0x7e, 0x7e, 0x7e,
+ 0x77, 0xee, 0x73, 0xce, 0x73, 0xce, 0x71, 0x8e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c,
+ 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 79 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x73, 0x9c, 0x79, 0xfc,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x38, 0x00, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 81 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x7e, 0x00, 0x77, 0x00,
+ 0x73, 0x80, 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70,
+ 0x70, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 82 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0,
+ 0x1f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 83 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 84 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 85 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 86 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x71, 0x8e, 0x73, 0xce, 0x73, 0xce, 0x77, 0xee,
+ 0x7e, 0x7e, 0x7e, 0x7e, 0x7c, 0x3e, 0x78, 0x1e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0,
+ 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x38, 0x38, 0x38,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 88 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70,
+ 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 89 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38,
+ 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 91 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 92 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 93 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70,
+ 0x38, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 94 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 96 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 98 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 99 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 100 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 101 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x01, 0xfe,
+ 0x03, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 102 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 103 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 104 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 105 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xf8, 0x00, 0xf8, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x3c, 0x78, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, /* 106 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x1c, 0x38, 0x38, 0x38, 0x70, 0x38, 0xe0,
+ 0x39, 0xc0, 0x3b, 0x80, 0x3f, 0x00, 0x3f, 0x00,
+ 0x3b, 0x80, 0x39, 0xc0, 0x38, 0xe0, 0x38, 0x70,
+ 0x38, 0x38, 0x38, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 107 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 108 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x73, 0xbc, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 109 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 110 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 111 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 112 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, /* 113 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0xfc, 0x77, 0xfc, 0x7e, 0x00, 0x7c, 0x00,
+ 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 114 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x00,
+ 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, 0x1f, 0xf8,
+ 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 115 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf0, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80,
+ 0x03, 0xfc, 0x01, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 116 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 117 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 118 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x7b, 0xbc,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 119 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
+ 0x1c, 0x70, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 120 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 121 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 122 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0, 0x03, 0xf0,
+ 0x07, 0x80, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x3e, 0x00,
+ 0x3e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80,
+ 0x03, 0xf0, 0x01, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 123 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 124 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x3f, 0x00,
+ 0x07, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x01, 0xf0,
+ 0x01, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x80,
+ 0x3f, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 125 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1e, 0x1c, 0x3f, 0x1c, 0x77, 0x9c, 0x73, 0xdc,
+ 0x71, 0xf8, 0x70, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 126 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1e, 0xf0, 0x3c, 0x78, 0x78, 0x3c,
+ 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xff, 0xfe, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 127 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 128 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 129 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 130 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 131 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 132 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 133 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 134 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 135 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 136 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 137 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 138 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 139 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 140 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 141 */
+ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 142 */
+ 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0,
+ 0x0e, 0xe0, 0x07, 0xc0, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 143 */
+ 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0,
+ 0x03, 0x80, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 144 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+ 0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
+ 0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
+ 0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 145 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfe, 0x7f, 0xfe,
+ 0xf1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xff, 0xfe,
+ 0xff, 0xfe, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xfe, 0xe1, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 146 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 147 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 148 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 149 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 150 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 151 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 152 */
+ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 153 */
+ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 154 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c,
+ 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80,
+ 0x73, 0x80, 0x73, 0x80, 0x73, 0x9c, 0x7b, 0xbc,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 155 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x0f, 0xf0,
+ 0x1e, 0x78, 0x1c, 0x38, 0x1c, 0x00, 0x1c, 0x00,
+ 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00,
+ 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 156 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
+ 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8,
+ 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 157 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x80,
+ 0xe3, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xc0, 0xe1, 0xc0, 0xe3, 0xc0, 0xff, 0xf0,
+ 0xff, 0x70, 0xe0, 0x70, 0xe3, 0xfe, 0xe3, 0xfe,
+ 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70,
+ 0xe0, 0x7e, 0xe0, 0x3e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 158 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc,
+ 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x1f, 0xf0,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80,
+ 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 159 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 160 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 161 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 162 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 163 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8,
+ 0x3b, 0xb8, 0x39, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 164 */
+ 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, 0x3b, 0xb8,
+ 0x39, 0xf0, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c,
+ 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 165 */
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xe0, 0x1f, 0xf0,
+ 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf8, 0x1f, 0xf8,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf8,
+ 0x0f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8,
+ 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 166 */
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf0,
+ 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8,
+ 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 167 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 168 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 169 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 170 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, 0x00,
+ 0x7c, 0x06, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38,
+ 0x1c, 0x70, 0x1c, 0xe0, 0x1d, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0xfc, 0x1d, 0xfe, 0x39, 0xce,
+ 0x71, 0xce, 0x60, 0x1c, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xfe, 0x01, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 171 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x1e, 0x00,
+ 0x3e, 0x00, 0x0e, 0x00, 0x0e, 0x06, 0x0e, 0x0e,
+ 0x0e, 0x1c, 0x0e, 0x38, 0x0e, 0x70, 0x00, 0xe0,
+ 0x01, 0xce, 0x03, 0x9e, 0x07, 0x3e, 0x0e, 0x7e,
+ 0x1c, 0xee, 0x39, 0xce, 0x73, 0xfe, 0x63, 0xfe,
+ 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 172 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 173 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xce, 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70,
+ 0x1c, 0xe0, 0x39, 0xc0, 0x73, 0x80, 0x73, 0x80,
+ 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, 0x07, 0x38,
+ 0x03, 0x9c, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 174 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x80, 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70,
+ 0x07, 0x38, 0x03, 0x9c, 0x01, 0xce, 0x01, 0xce,
+ 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, 0x1c, 0xe0,
+ 0x39, 0xc0, 0x73, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 175 */
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, /* 176 */
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, /* 177 */
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, /* 178 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 179 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 180 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 181 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70,
+ 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 182 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xf0, 0xff, 0xf0,
+ 0xff, 0xf0, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 183 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 184 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 185 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 186 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x70,
+ 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 187 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xf0, 0xff, 0xf0,
+ 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 189 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 190 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, 0xff, 0x80,
+ 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 191 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 192 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 193 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 194 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 195 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 196 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 197 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 198 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f,
+ 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 199 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 200 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 201 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 202 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 203 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 204 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 205 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00,
+ 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 206 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 207 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 208 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 209 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 210 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0f, 0xff, 0x0f, 0xff,
+ 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 211 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 212 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 213 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff,
+ 0x0f, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 214 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 215 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 216 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 217 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 218 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 219 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 220 */
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, /* 221 */
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, /* 222 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 223 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xee, 0x3f, 0xfe, 0x78, 0x3c, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x3c,
+ 0x3f, 0xfe, 0x1f, 0xee, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 224 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xe0, 0x7f, 0xf0,
+ 0x70, 0x78, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x70, 0x7f, 0xf0, 0x7f, 0xf0,
+ 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 225 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 226 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 227 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 228 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xfe, 0x3f, 0xfe, 0x78, 0xf0, 0x70, 0x78,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 229 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x3c, 0x70, 0x7c, 0x70, 0xfc,
+ 0x7f, 0xdc, 0x7f, 0x9c, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 230 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xc0,
+ 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 231 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 232 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x77, 0xdc,
+ 0x77, 0xdc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 233 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 234 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x1f, 0xf0,
+ 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x38, 0x38, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 235 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf8,
+ 0x7f, 0xfc, 0xe7, 0xce, 0xe3, 0x8e, 0xe3, 0x8e,
+ 0xe3, 0x8e, 0xe3, 0x8e, 0xe7, 0xce, 0x7f, 0xfc,
+ 0x3e, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 236 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf0, 0x1f, 0xf8,
+ 0x38, 0xfc, 0x38, 0xfc, 0x39, 0xdc, 0x39, 0xdc,
+ 0x3b, 0x9c, 0x3b, 0x9c, 0x3f, 0x1c, 0x3f, 0x1c,
+ 0x1f, 0xf8, 0x0f, 0xf0, 0x1c, 0x00, 0x1c, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 237 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x07, 0xfc, 0x1f, 0xfc, 0x3c, 0x00,
+ 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00,
+ 0x3c, 0x00, 0x1f, 0xfc, 0x07, 0xfc, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 238 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x1f, 0xf0,
+ 0x3c, 0x78, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 239 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 240 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 241 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 242 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0x70, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 243 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc,
+ 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 244 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80,
+ 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 245 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 246 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x1c,
+ 0x7f, 0xbc, 0x7b, 0xfc, 0x70, 0xf8, 0x00, 0x00,
+ 0x00, 0x00, 0x3e, 0x1c, 0x7f, 0xbc, 0x7b, 0xfc,
+ 0x70, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 247 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1f, 0xf0, 0x0f, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 248 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x07, 0xe0,
+ 0x07, 0xe0, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 249 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 250 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e,
+ 0x00, 0x3e, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x78, 0x38, 0x3c, 0x38,
+ 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xb8, 0x03, 0xf8,
+ 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 251 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xe0, 0x1f, 0xf0, 0x1c, 0x38, 0x1c, 0x38,
+ 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38,
+ 0x1c, 0x38, 0x1c, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 252 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0,
+ 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 253 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 254 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */
+} };
+
+
+const struct font_desc font_ter_16x32 = {
+ .idx = TER16x32_IDX,
+ .name = "TER16x32",
+ .width = 16,
+ .height = 32,
+ .charcount = 256,
+ .data = fontdata_ter16x32.data,
+#ifdef __sparc__
+ .pref = 5,
+#else
+ .pref = -1,
+#endif
+};
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
new file mode 100644
index 000000000..973866438
--- /dev/null
+++ b/lib/fonts/fonts.c
@@ -0,0 +1,153 @@
+/*
+ * `Soft' font definitions
+ *
+ * Created 1995 by Geert Uytterhoeven
+ * Rewritten 1998 by Martin Mares <mj@ucw.cz>
+ *
+ * 2001 - Documented with DocBook
+ * - Brad Douglas <brad@neruo.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#if defined(__mc68000__)
+#include <asm/setup.h>
+#endif
+#include <linux/font.h>
+
+static const struct font_desc *fonts[] = {
+#ifdef CONFIG_FONT_8x8
+ &font_vga_8x8,
+#endif
+#ifdef CONFIG_FONT_8x16
+ &font_vga_8x16,
+#endif
+#ifdef CONFIG_FONT_6x11
+ &font_vga_6x11,
+#endif
+#ifdef CONFIG_FONT_7x14
+ &font_7x14,
+#endif
+#ifdef CONFIG_FONT_SUN8x16
+ &font_sun_8x16,
+#endif
+#ifdef CONFIG_FONT_SUN12x22
+ &font_sun_12x22,
+#endif
+#ifdef CONFIG_FONT_10x18
+ &font_10x18,
+#endif
+#ifdef CONFIG_FONT_ACORN_8x8
+ &font_acorn_8x8,
+#endif
+#ifdef CONFIG_FONT_PEARL_8x8
+ &font_pearl_8x8,
+#endif
+#ifdef CONFIG_FONT_MINI_4x6
+ &font_mini_4x6,
+#endif
+#ifdef CONFIG_FONT_6x10
+ &font_6x10,
+#endif
+#ifdef CONFIG_FONT_TER16x32
+ &font_ter_16x32,
+#endif
+#ifdef CONFIG_FONT_6x8
+ &font_6x8,
+#endif
+};
+
+#define num_fonts ARRAY_SIZE(fonts)
+
+#ifdef NO_FONTS
+#error No fonts configured.
+#endif
+
+
+/**
+ * find_font - find a font
+ * @name: string name of a font
+ *
+ * Find a specified font with string name @name.
+ *
+ * Returns %NULL if no font found, or a pointer to the
+ * specified font.
+ *
+ */
+const struct font_desc *find_font(const char *name)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(!num_fonts);
+ for (i = 0; i < num_fonts; i++)
+ if (!strcmp(fonts[i]->name, name))
+ return fonts[i];
+ return NULL;
+}
+EXPORT_SYMBOL(find_font);
+
+
+/**
+ * get_default_font - get default font
+ * @xres: screen size of X
+ * @yres: screen size of Y
+ * @font_w: bit array of supported widths (1 - 32)
+ * @font_h: bit array of supported heights (1 - 32)
+ *
+ * Get the default font for a specified screen size.
+ * Dimensions are in pixels.
+ *
+ * Returns %NULL if no font is found, or a pointer to the
+ * chosen font.
+ *
+ */
+const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
+ u32 font_h)
+{
+ int i, c, cc, res;
+ const struct font_desc *f, *g;
+
+ g = NULL;
+ cc = -10000;
+ for (i = 0; i < num_fonts; i++) {
+ f = fonts[i];
+ c = f->pref;
+#if defined(__mc68000__)
+#ifdef CONFIG_FONT_PEARL_8x8
+ if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
+ c = 100;
+#endif
+#ifdef CONFIG_FONT_6x11
+ if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
+ c = 100;
+#endif
+#endif
+ if ((yres < 400) == (f->height <= 8))
+ c += 1000;
+
+ /* prefer a bigger font for high resolution */
+ res = (xres / f->width) * (yres / f->height) / 1000;
+ if (res > 20)
+ c += 20 - res;
+
+ if ((font_w & (1U << (f->width - 1))) &&
+ (font_h & (1U << (f->height - 1))))
+ c += 1000;
+
+ if (c > cc) {
+ cc = c;
+ g = f;
+ }
+ }
+ return g;
+}
+EXPORT_SYMBOL(get_default_font);
+
+MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
+MODULE_DESCRIPTION("Console Fonts");
+MODULE_LICENSE("GPL");
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
new file mode 100644
index 000000000..c8c33cbaa
--- /dev/null
+++ b/lib/fortify_kunit.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
+ * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ *
+ * For corner cases with UBSAN, try testing with:
+ *
+ * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
+ * --kconfig_add CONFIG_FORTIFY_SOURCE=y \
+ * --kconfig_add CONFIG_UBSAN=y \
+ * --kconfig_add CONFIG_UBSAN_TRAP=y \
+ * --kconfig_add CONFIG_UBSAN_BOUNDS=y \
+ * --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
+ * --make_options LLVM=1 fortify
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+static const char array_of_10[] = "this is 10";
+static const char *ptr_of_11 = "this is 11!";
+static char array_unknown[] = "compiler thinks I might change";
+
+static void known_sizes_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
+
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
+ /* Externally defined and dynamically sized string pointer: */
+ KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
+}
+
+/* This is volatile so the optimizer can't perform DCE below. */
+static volatile int pick;
+
+/* Not inline to keep optimizer from figuring out which string we want. */
+static noinline size_t want_minus_one(int pick)
+{
+ const char *str;
+
+ switch (pick) {
+ case 1:
+ str = "4444";
+ break;
+ case 2:
+ str = "333";
+ break;
+ default:
+ str = "1";
+ break;
+ }
+ return __compiletime_strlen(str);
+}
+
+static void control_flow_split_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
+}
+
+#define KUNIT_EXPECT_BOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bos on " name "\n")
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ /* Silence "unused variable 'expected'" warning. */ \
+ KUNIT_EXPECT_EQ(test, expected, expected)
+#else
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bdos on " name "\n")
+#endif
+
+/* If the execpted size is a constant value, __bos can see it. */
+#define check_const(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
+#define check_dynamic(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* Assortment of constant-value kinda-edge cases. */
+#define CONST_TEST_BODY(TEST_alloc) do { \
+ /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
+ if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
+ TEST_alloc(check_const, 0, 0); \
+ TEST_alloc(check_const, 1, 1); \
+ TEST_alloc(check_const, 128, 128); \
+ TEST_alloc(check_const, 1023, 1023); \
+ TEST_alloc(check_const, 1025, 1025); \
+ TEST_alloc(check_const, 4096, 4096); \
+ TEST_alloc(check_const, 4097, 4097); \
+} while (0)
+
+static volatile size_t zero_size;
+static volatile size_t unknown_size = 50;
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define DYNAMIC_TEST_BODY(TEST_alloc) \
+ kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
+#else
+#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
+ size_t size = unknown_size; \
+ \
+ /* \
+ * Expected size is "size" in each test, before it is then \
+ * internally incremented in each test. Requires we disable \
+ * -Wunsequenced. \
+ */ \
+ TEST_alloc(check_dynamic, size, size++); \
+ /* Make sure incrementing actually happened. */ \
+ KUNIT_EXPECT_NE(test, size, unknown_size); \
+} while (0)
+#endif
+
+#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
+static void alloc_size_##allocator##_const_test(struct kunit *test) \
+{ \
+ CONST_TEST_BODY(TEST_##allocator); \
+} \
+static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+{ \
+ DYNAMIC_TEST_BODY(TEST_##allocator); \
+}
+
+#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ void *orig; \
+ size_t len; \
+ \
+ checker(expected_size, kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kzalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, __kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc(orig, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
+ kfree(p)); \
+ \
+ len = 11; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
+
+/* Sizes are in pages, not bytes. */
+#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ checker((expected_pages) * PAGE_SIZE, \
+ vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+
+/* Sizes are in pages (and open-coded for side-effects), not bytes. */
+#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ size_t prev_size; \
+ void *orig; \
+ \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ \
+ prev_size = (expected_pages) * PAGE_SIZE; \
+ orig = kvmalloc(prev_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker(((expected_pages) * PAGE_SIZE) * 2, \
+ kvrealloc(orig, prev_size, \
+ ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
+
+#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ const char dev_name[] = "fortify-test"; \
+ struct device *dev; \
+ void *orig; \
+ size_t len; \
+ \
+ /* Create dummy device for devm_kmalloc()-family tests. */ \
+ dev = root_device_register(dev_name); \
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
+ "Cannot register test device\n"); \
+ \
+ checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ orig = devm_kmalloc(dev, alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ len = 4; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ device_unregister(dev); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+
+static struct kunit_case fortify_test_cases[] = {
+ KUNIT_CASE(known_sizes_test),
+ KUNIT_CASE(control_flow_split_test),
+ KUNIT_CASE(alloc_size_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_vmalloc_const_test),
+ KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_kvmalloc_const_test),
+ KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
+ {}
+};
+
+static struct kunit_suite fortify_test_suite = {
+ .name = "fortify",
+ .test_cases = fortify_test_cases,
+};
+
+kunit_test_suite(fortify_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
new file mode 100644
index 000000000..f755b997b
--- /dev/null
+++ b/lib/gen_crc32table.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include "../include/linux/crc32poly.h"
+#include "../include/generated/autoconf.h"
+#include "crc32defs.h"
+#include <inttypes.h>
+
+#define ENTRIES_PER_LINE 4
+
+#if CRC_LE_BITS > 8
+# define LE_TABLE_ROWS (CRC_LE_BITS/8)
+# define LE_TABLE_SIZE 256
+#else
+# define LE_TABLE_ROWS 1
+# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
+#endif
+
+#if CRC_BE_BITS > 8
+# define BE_TABLE_ROWS (CRC_BE_BITS/8)
+# define BE_TABLE_SIZE 256
+#else
+# define BE_TABLE_ROWS 1
+# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
+#endif
+
+static uint32_t crc32table_le[LE_TABLE_ROWS][256];
+static uint32_t crc32table_be[BE_TABLE_ROWS][256];
+static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
+
+/**
+ * crc32init_le() - allocate and initialize LE table data
+ *
+ * crc is the crc of the byte i; other entries are filled in based on the
+ * fact that crctable[i^j] = crctable[i] ^ crctable[j].
+ *
+ */
+static void crc32init_le_generic(const uint32_t polynomial,
+ uint32_t (*tab)[256])
+{
+ unsigned i, j;
+ uint32_t crc = 1;
+
+ tab[0][0] = 0;
+
+ for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
+ crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
+ for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
+ tab[0][i + j] = crc ^ tab[0][j];
+ }
+ for (i = 0; i < LE_TABLE_SIZE; i++) {
+ crc = tab[0][i];
+ for (j = 1; j < LE_TABLE_ROWS; j++) {
+ crc = tab[0][crc & 0xff] ^ (crc >> 8);
+ tab[j][i] = crc;
+ }
+ }
+}
+
+static void crc32init_le(void)
+{
+ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
+}
+
+static void crc32cinit_le(void)
+{
+ crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le);
+}
+
+/**
+ * crc32init_be() - allocate and initialize BE table data
+ */
+static void crc32init_be(void)
+{
+ unsigned i, j;
+ uint32_t crc = 0x80000000;
+
+ crc32table_be[0][0] = 0;
+
+ for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
+ for (j = 0; j < i; j++)
+ crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
+ }
+ for (i = 0; i < BE_TABLE_SIZE; i++) {
+ crc = crc32table_be[0][i];
+ for (j = 1; j < BE_TABLE_ROWS; j++) {
+ crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
+ crc32table_be[j][i] = crc;
+ }
+ }
+}
+
+static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
+{
+ int i, j;
+
+ for (j = 0 ; j < rows; j++) {
+ printf("{");
+ for (i = 0; i < len - 1; i++) {
+ if (i % ENTRIES_PER_LINE == 0)
+ printf("\n");
+ printf("%s(0x%8.8xL), ", trans, table[j][i]);
+ }
+ printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
+ }
+}
+
+int main(int argc, char** argv)
+{
+ printf("/* this file is generated - do not edit */\n\n");
+
+ if (CRC_LE_BITS > 1) {
+ crc32init_le();
+ printf("static const u32 ____cacheline_aligned "
+ "crc32table_le[%d][%d] = {",
+ LE_TABLE_ROWS, LE_TABLE_SIZE);
+ output_table(crc32table_le, LE_TABLE_ROWS,
+ LE_TABLE_SIZE, "tole");
+ printf("};\n");
+ }
+
+ if (CRC_BE_BITS > 1) {
+ crc32init_be();
+ printf("static const u32 ____cacheline_aligned "
+ "crc32table_be[%d][%d] = {",
+ BE_TABLE_ROWS, BE_TABLE_SIZE);
+ output_table(crc32table_be, LE_TABLE_ROWS,
+ BE_TABLE_SIZE, "tobe");
+ printf("};\n");
+ }
+ if (CRC_LE_BITS > 1) {
+ crc32cinit_le();
+ printf("static const u32 ____cacheline_aligned "
+ "crc32ctable_le[%d][%d] = {",
+ LE_TABLE_ROWS, LE_TABLE_SIZE);
+ output_table(crc32ctable_le, LE_TABLE_ROWS,
+ LE_TABLE_SIZE, "tole");
+ printf("};\n");
+ }
+
+ return 0;
+}
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
new file mode 100644
index 000000000..55e222acd
--- /dev/null
+++ b/lib/gen_crc64table.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generate lookup table for the table-driven CRC64 calculation.
+ *
+ * gen_crc64table is executed in kernel build time and generates
+ * lib/crc64table.h. This header is included by lib/crc64.c for
+ * the table-driven CRC64 calculation.
+ *
+ * See lib/crc64.c for more information about which specification
+ * and polynomial arithmetic that gen_crc64table.c follows to
+ * generate the lookup table.
+ *
+ * Copyright 2018 SUSE Linux.
+ * Author: Coly Li <colyli@suse.de>
+ */
+#include <inttypes.h>
+#include <stdio.h>
+
+#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
+#define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL
+
+static uint64_t crc64_table[256] = {0};
+static uint64_t crc64_rocksoft_table[256] = {0};
+
+static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0ULL;
+ c = i;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ (c >> j)) & 1)
+ crc = (crc >> 1) ^ poly;
+ else
+ crc >>= 1;
+ }
+ table[i] = crc;
+ }
+}
+
+static void generate_crc64_table(uint64_t table[256], uint64_t poly)
+{
+ uint64_t i, j, c, crc;
+
+ for (i = 0; i < 256; i++) {
+ crc = 0;
+ c = i << 56;
+
+ for (j = 0; j < 8; j++) {
+ if ((crc ^ c) & 0x8000000000000000ULL)
+ crc = (crc << 1) ^ poly;
+ else
+ crc <<= 1;
+ c <<= 1;
+ }
+
+ table[i] = crc;
+ }
+}
+
+static void output_table(uint64_t table[256])
+{
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ printf("\t0x%016" PRIx64 "ULL", table[i]);
+ if (i & 0x1)
+ printf(",\n");
+ else
+ printf(", ");
+ }
+ printf("};\n");
+}
+
+static void print_crc64_tables(void)
+{
+ printf("/* this file is generated - do not edit */\n\n");
+ printf("#include <linux/types.h>\n");
+ printf("#include <linux/cache.h>\n\n");
+ printf("static const u64 ____cacheline_aligned crc64table[256] = {\n");
+ output_table(crc64_table);
+
+ printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n");
+ output_table(crc64_rocksoft_table);
+}
+
+int main(int argc, char *argv[])
+{
+ generate_crc64_table(crc64_table, CRC64_ECMA182_POLY);
+ generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY);
+ print_crc64_tables();
+ return 0;
+}
diff --git a/lib/genalloc.c b/lib/genalloc.c
new file mode 100644
index 000000000..4fa5635bf
--- /dev/null
+++ b/lib/genalloc.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/bitmap.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/genalloc.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+
+static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
+{
+ return chunk->end_addr - chunk->start_addr + 1;
+}
+
+static inline int
+set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
+{
+ unsigned long val = READ_ONCE(*addr);
+
+ do {
+ if (val & mask_to_set)
+ return -EBUSY;
+ cpu_relax();
+ } while (!try_cmpxchg(addr, &val, val | mask_to_set));
+
+ return 0;
+}
+
+static inline int
+clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
+{
+ unsigned long val = READ_ONCE(*addr);
+
+ do {
+ if ((val & mask_to_clear) != mask_to_clear)
+ return -EBUSY;
+ cpu_relax();
+ } while (!try_cmpxchg(addr, &val, val & ~mask_to_clear));
+
+ return 0;
+}
+
+/*
+ * bitmap_set_ll - set the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Set @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users set the same bit, one user will return remain bits, otherwise
+ * return 0.
+ */
+static unsigned long
+bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned long size = start + nr;
+ int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr >= bits_to_set) {
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ nr -= bits_to_set;
+ bits_to_set = BITS_PER_LONG;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_set &= BITMAP_LAST_WORD_MASK(size);
+ if (set_bits_ll(p, mask_to_set))
+ return nr;
+ }
+
+ return 0;
+}
+
+/*
+ * bitmap_clear_ll - clear the specified number of bits at the specified position
+ * @map: pointer to a bitmap
+ * @start: a bit position in @map
+ * @nr: number of bits to set
+ *
+ * Clear @nr bits start from @start in @map lock-lessly. Several users
+ * can set/clear the same bitmap simultaneously without lock. If two
+ * users clear the same bit, one user will return remain bits,
+ * otherwise return 0.
+ */
+static unsigned long
+bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
+{
+ unsigned long *p = map + BIT_WORD(start);
+ const unsigned long size = start + nr;
+ int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
+ unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+
+ while (nr >= bits_to_clear) {
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ nr -= bits_to_clear;
+ bits_to_clear = BITS_PER_LONG;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (nr) {
+ mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
+ if (clear_bits_ll(p, mask_to_clear))
+ return nr;
+ }
+
+ return 0;
+}
+
+/**
+ * gen_pool_create - create a new special memory pool
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node id of the node the pool structure should be allocated on, or -1
+ *
+ * Create a new special memory pool that can be used to manage special purpose
+ * memory not managed by the regular kmalloc/kfree interface.
+ */
+struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
+{
+ struct gen_pool *pool;
+
+ pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
+ if (pool != NULL) {
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->chunks);
+ pool->min_alloc_order = min_alloc_order;
+ pool->algo = gen_pool_first_fit;
+ pool->data = NULL;
+ pool->name = NULL;
+ }
+ return pool;
+}
+EXPORT_SYMBOL(gen_pool_create);
+
+/**
+ * gen_pool_add_owner- add a new chunk of special memory to the pool
+ * @pool: pool to add new memory chunk to
+ * @virt: virtual starting address of memory chunk to add to pool
+ * @phys: physical starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ * allocated on, or -1
+ * @owner: private data the publisher would like to recall at alloc time
+ *
+ * Add a new chunk of special memory to the specified pool.
+ *
+ * Returns 0 on success or a -ve errno on failure.
+ */
+int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
+ size_t size, int nid, void *owner)
+{
+ struct gen_pool_chunk *chunk;
+ unsigned long nbits = size >> pool->min_alloc_order;
+ unsigned long nbytes = sizeof(struct gen_pool_chunk) +
+ BITS_TO_LONGS(nbits) * sizeof(long);
+
+ chunk = vzalloc_node(nbytes, nid);
+ if (unlikely(chunk == NULL))
+ return -ENOMEM;
+
+ chunk->phys_addr = phys;
+ chunk->start_addr = virt;
+ chunk->end_addr = virt + size - 1;
+ chunk->owner = owner;
+ atomic_long_set(&chunk->avail, size);
+
+ spin_lock(&pool->lock);
+ list_add_rcu(&chunk->next_chunk, &pool->chunks);
+ spin_unlock(&pool->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(gen_pool_add_owner);
+
+/**
+ * gen_pool_virt_to_phys - return the physical address of memory
+ * @pool: pool to allocate from
+ * @addr: starting address of memory
+ *
+ * Returns the physical address on success, or -1 on error.
+ */
+phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
+{
+ struct gen_pool_chunk *chunk;
+ phys_addr_t paddr = -1;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
+ paddr = chunk->phys_addr + (addr - chunk->start_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return paddr;
+}
+EXPORT_SYMBOL(gen_pool_virt_to_phys);
+
+/**
+ * gen_pool_destroy - destroy a special memory pool
+ * @pool: pool to destroy
+ *
+ * Destroy the specified special memory pool. Verifies that there are no
+ * outstanding allocations.
+ */
+void gen_pool_destroy(struct gen_pool *pool)
+{
+ struct list_head *_chunk, *_next_chunk;
+ struct gen_pool_chunk *chunk;
+ int order = pool->min_alloc_order;
+ unsigned long bit, end_bit;
+
+ list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+ list_del(&chunk->next_chunk);
+
+ end_bit = chunk_size(chunk) >> order;
+ bit = find_first_bit(chunk->bits, end_bit);
+ BUG_ON(bit < end_bit);
+
+ vfree(chunk);
+ }
+ kfree_const(pool->name);
+ kfree(pool);
+}
+EXPORT_SYMBOL(gen_pool_destroy);
+
+/**
+ * gen_pool_alloc_algo_owner - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ * @owner: optionally retrieve the chunk owner
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
+ genpool_algo_t algo, void *data, void **owner)
+{
+ struct gen_pool_chunk *chunk;
+ unsigned long addr = 0;
+ int order = pool->min_alloc_order;
+ unsigned long nbits, start_bit, end_bit, remain;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ if (owner)
+ *owner = NULL;
+
+ if (size == 0)
+ return 0;
+
+ nbits = (size + (1UL << order) - 1) >> order;
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (size > atomic_long_read(&chunk->avail))
+ continue;
+
+ start_bit = 0;
+ end_bit = chunk_size(chunk) >> order;
+retry:
+ start_bit = algo(chunk->bits, end_bit, start_bit,
+ nbits, data, pool, chunk->start_addr);
+ if (start_bit >= end_bit)
+ continue;
+ remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
+ if (remain) {
+ remain = bitmap_clear_ll(chunk->bits, start_bit,
+ nbits - remain);
+ BUG_ON(remain);
+ goto retry;
+ }
+
+ addr = chunk->start_addr + ((unsigned long)start_bit << order);
+ size = nbits << order;
+ atomic_long_sub(size, &chunk->avail);
+ if (owner)
+ *owner = chunk->owner;
+ break;
+ }
+ rcu_read_unlock();
+ return addr;
+}
+EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
+
+/**
+ * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc);
+
+/**
+ * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
+ * usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool. Uses the
+ * given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
+ unsigned long vaddr;
+
+ if (!pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc_algo(pool, size, algo, data);
+ if (!vaddr)
+ return NULL;
+
+ if (dma)
+ *dma = gen_pool_virt_to_phys(pool, vaddr);
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
+
+/**
+ * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
+ * usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number bytes from the specified pool, with the given
+ * alignment restriction. Can not be used in NMI handler on architectures
+ * without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated memory, or %NULL on failure
+ */
+void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_alloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_alloc_align);
+
+/**
+ * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
+ * DMA usage
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: dma-view physical address return value. Use %NULL if unneeded.
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
+{
+ return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc);
+
+/**
+ * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
+ * DMA usage with the given pool algorithm
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool. Uses
+ * the given pool allocation function. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, genpool_algo_t algo, void *data)
+{
+ void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
+
+ if (vaddr)
+ memset(vaddr, 0, size);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
+
+/**
+ * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
+ * DMA usage with the given alignment
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @dma: DMA-view physical address return value. Use %NULL if unneeded.
+ * @align: alignment in bytes for starting address
+ *
+ * Allocate the requested number of zeroed bytes from the specified pool,
+ * with the given alignment restriction. Can not be used in NMI handler on
+ * architectures without NMI-safe cmpxchg implementation.
+ *
+ * Return: virtual address of the allocated zeroed memory, or %NULL on failure
+ */
+void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma, int align)
+{
+ struct genpool_data_align data = { .align = align };
+
+ return gen_pool_dma_zalloc_algo(pool, size, dma,
+ gen_pool_first_fit_align, &data);
+}
+EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
+
+/**
+ * gen_pool_free_owner - free allocated special memory back to the pool
+ * @pool: pool to free to
+ * @addr: starting address of memory to free back to pool
+ * @size: size in bytes of memory to free
+ * @owner: private data stashed at gen_pool_add() time
+ *
+ * Free previously allocated special memory back to the specified
+ * pool. Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
+ void **owner)
+{
+ struct gen_pool_chunk *chunk;
+ int order = pool->min_alloc_order;
+ unsigned long start_bit, nbits, remain;
+
+#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ BUG_ON(in_nmi());
+#endif
+
+ if (owner)
+ *owner = NULL;
+
+ nbits = (size + (1UL << order) - 1) >> order;
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+ if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
+ BUG_ON(addr + size - 1 > chunk->end_addr);
+ start_bit = (addr - chunk->start_addr) >> order;
+ remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
+ BUG_ON(remain);
+ size = nbits << order;
+ atomic_long_add(size, &chunk->avail);
+ if (owner)
+ *owner = chunk->owner;
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ BUG();
+}
+EXPORT_SYMBOL(gen_pool_free_owner);
+
+/**
+ * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
+ * @pool: the generic memory pool
+ * @func: func to call
+ * @data: additional data used by @func
+ *
+ * Call @func for every chunk of generic memory pool. The @func is
+ * called with rcu_read_lock held.
+ */
+void gen_pool_for_each_chunk(struct gen_pool *pool,
+ void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
+ void *data)
+{
+ struct gen_pool_chunk *chunk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
+ func(pool, chunk, data);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(gen_pool_for_each_chunk);
+
+/**
+ * gen_pool_has_addr - checks if an address falls within the range of a pool
+ * @pool: the generic memory pool
+ * @start: start address
+ * @size: size of the region
+ *
+ * Check if the range of addresses falls within the specified pool. Returns
+ * true if the entire range is contained in the pool and false otherwise.
+ */
+bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
+ size_t size)
+{
+ bool found = false;
+ unsigned long end = start + size - 1;
+ struct gen_pool_chunk *chunk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
+ if (start >= chunk->start_addr && start <= chunk->end_addr) {
+ if (end <= chunk->end_addr) {
+ found = true;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+ return found;
+}
+EXPORT_SYMBOL(gen_pool_has_addr);
+
+/**
+ * gen_pool_avail - get available free space of the pool
+ * @pool: pool to get available free space
+ *
+ * Return available free space of the specified pool.
+ */
+size_t gen_pool_avail(struct gen_pool *pool)
+{
+ struct gen_pool_chunk *chunk;
+ size_t avail = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+ avail += atomic_long_read(&chunk->avail);
+ rcu_read_unlock();
+ return avail;
+}
+EXPORT_SYMBOL_GPL(gen_pool_avail);
+
+/**
+ * gen_pool_size - get size in bytes of memory managed by the pool
+ * @pool: pool to get size
+ *
+ * Return size in bytes of memory managed by the pool.
+ */
+size_t gen_pool_size(struct gen_pool *pool)
+{
+ struct gen_pool_chunk *chunk;
+ size_t size = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+ size += chunk_size(chunk);
+ rcu_read_unlock();
+ return size;
+}
+EXPORT_SYMBOL_GPL(gen_pool_size);
+
+/**
+ * gen_pool_set_algo - set the allocation algorithm
+ * @pool: pool to change allocation algorithm
+ * @algo: custom algorithm function
+ * @data: additional data used by @algo
+ *
+ * Call @algo for each memory allocation in the pool.
+ * If @algo is NULL use gen_pool_first_fit as default
+ * memory allocation function.
+ */
+void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
+{
+ rcu_read_lock();
+
+ pool->algo = algo;
+ if (!pool->algo)
+ pool->algo = gen_pool_first_fit;
+
+ pool->data = data;
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(gen_pool_set_algo);
+
+/**
+ * gen_pool_first_fit - find the first available region
+ * of memory matching the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
+ */
+unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool, unsigned long start_addr)
+{
+ return bitmap_find_next_zero_area(map, size, start, nr, 0);
+}
+EXPORT_SYMBOL(gen_pool_first_fit);
+
+/**
+ * gen_pool_first_fit_align - find the first available region
+ * of memory matching the size requirement (alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ * @start_addr: start addr of alloction chunk
+ */
+unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool, unsigned long start_addr)
+{
+ struct genpool_data_align *alignment;
+ unsigned long align_mask, align_off;
+ int order;
+
+ alignment = data;
+ order = pool->min_alloc_order;
+ align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
+ align_off = (start_addr & (alignment->align - 1)) >> order;
+
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, align_off);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_align);
+
+/**
+ * gen_pool_fixed_alloc - reserve a specific region
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ * @start_addr: not used in this function
+ */
+unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool, unsigned long start_addr)
+{
+ struct genpool_data_fixed *fixed_data;
+ int order;
+ unsigned long offset_bit;
+ unsigned long start_bit;
+
+ fixed_data = data;
+ order = pool->min_alloc_order;
+ offset_bit = fixed_data->offset >> order;
+ if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
+ return size;
+
+ start_bit = bitmap_find_next_zero_area(map, size,
+ start + offset_bit, nr, 0);
+ if (start_bit != offset_bit)
+ start_bit = size;
+ return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_fixed_alloc);
+
+/**
+ * gen_pool_first_fit_order_align - find the first available region
+ * of memory matching the size requirement. The region will be aligned
+ * to the order of the size specified.
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
+ */
+unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start,
+ unsigned int nr, void *data, struct gen_pool *pool,
+ unsigned long start_addr)
+{
+ unsigned long align_mask = roundup_pow_of_two(nr) - 1;
+
+ return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_order_align);
+
+/**
+ * gen_pool_best_fit - find the best fitting region of memory
+ * matching the size requirement (no alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
+ * @start_addr: not used in this function
+ *
+ * Iterate over the bitmap to find the smallest free region
+ * which we can allocate the memory.
+ */
+unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data,
+ struct gen_pool *pool, unsigned long start_addr)
+{
+ unsigned long start_bit = size;
+ unsigned long len = size + 1;
+ unsigned long index;
+
+ index = bitmap_find_next_zero_area(map, size, start, nr, 0);
+
+ while (index < size) {
+ unsigned long next_bit = find_next_bit(map, size, index + nr);
+ if ((next_bit - index) < len) {
+ len = next_bit - index;
+ start_bit = index;
+ if (len == nr)
+ return start_bit;
+ }
+ index = bitmap_find_next_zero_area(map, size,
+ next_bit + 1, nr, 0);
+ }
+
+ return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_best_fit);
+
+static void devm_gen_pool_release(struct device *dev, void *res)
+{
+ gen_pool_destroy(*(struct gen_pool **)res);
+}
+
+static int devm_gen_pool_match(struct device *dev, void *res, void *data)
+{
+ struct gen_pool **p = res;
+
+ /* NULL data matches only a pool without an assigned name */
+ if (!data && !(*p)->name)
+ return 1;
+
+ if (!data || !(*p)->name)
+ return 0;
+
+ return !strcmp((*p)->name, data);
+}
+
+/**
+ * gen_pool_get - Obtain the gen_pool (if any) for a device
+ * @dev: device to retrieve the gen_pool from
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
+ *
+ * Returns the gen_pool for the device if one is present, or NULL.
+ */
+struct gen_pool *gen_pool_get(struct device *dev, const char *name)
+{
+ struct gen_pool **p;
+
+ p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
+ (void *)name);
+ if (!p)
+ return NULL;
+ return *p;
+}
+EXPORT_SYMBOL_GPL(gen_pool_get);
+
+/**
+ * devm_gen_pool_create - managed gen_pool_create
+ * @dev: device that provides the gen_pool
+ * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
+ * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
+ * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
+ *
+ * Create a new special memory pool that can be used to manage special purpose
+ * memory not managed by the regular kmalloc/kfree interface. The pool will be
+ * automatically destroyed by the device management code.
+ */
+struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
+ int nid, const char *name)
+{
+ struct gen_pool **ptr, *pool;
+ const char *pool_name = NULL;
+
+ /* Check that genpool to be created is uniquely addressed on device */
+ if (gen_pool_get(dev, name))
+ return ERR_PTR(-EINVAL);
+
+ if (name) {
+ pool_name = kstrdup_const(name, GFP_KERNEL);
+ if (!pool_name)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ goto free_pool_name;
+
+ pool = gen_pool_create(min_alloc_order, nid);
+ if (!pool)
+ goto free_devres;
+
+ *ptr = pool;
+ pool->name = pool_name;
+ devres_add(dev, ptr);
+
+ return pool;
+
+free_devres:
+ devres_free(ptr);
+free_pool_name:
+ kfree_const(pool_name);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(devm_gen_pool_create);
+
+#ifdef CONFIG_OF
+/**
+ * of_gen_pool_get - find a pool by phandle property
+ * @np: device node
+ * @propname: property name containing phandle(s)
+ * @index: index into the phandle array
+ *
+ * Returns the pool that contains the chunk starting at the physical
+ * address of the device tree node pointed at by the phandle property,
+ * or NULL if not found.
+ */
+struct gen_pool *of_gen_pool_get(struct device_node *np,
+ const char *propname, int index)
+{
+ struct platform_device *pdev;
+ struct device_node *np_pool, *parent;
+ const char *name = NULL;
+ struct gen_pool *pool = NULL;
+
+ np_pool = of_parse_phandle(np, propname, index);
+ if (!np_pool)
+ return NULL;
+
+ pdev = of_find_device_by_node(np_pool);
+ if (!pdev) {
+ /* Check if named gen_pool is created by parent node device */
+ parent = of_get_parent(np_pool);
+ pdev = of_find_device_by_node(parent);
+ of_node_put(parent);
+
+ of_property_read_string(np_pool, "label", &name);
+ if (!name)
+ name = of_node_full_name(np_pool);
+ }
+ if (pdev)
+ pool = gen_pool_get(&pdev->dev, name);
+ of_node_put(np_pool);
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(of_gen_pool_get);
+#endif /* CONFIG_OF */
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
new file mode 100644
index 000000000..7dfa88282
--- /dev/null
+++ b/lib/generic-radix-tree.c
@@ -0,0 +1,248 @@
+
+#include <linux/export.h>
+#include <linux/generic-radix-tree.h>
+#include <linux/gfp.h>
+#include <linux/kmemleak.h>
+
+#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[PAGE_SIZE];
+ };
+};
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
+/*
+ * Returns pointer to the specified byte @offset within @radix, or NULL if not
+ * allocated
+ */
+void *__genradix_ptr(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+
+ if (ilog2(offset) >= genradix_depth_shift(level))
+ return NULL;
+
+ while (1) {
+ if (!n)
+ return NULL;
+ if (!level)
+ break;
+
+ level--;
+
+ n = n->children[offset >> genradix_depth_shift(level)];
+ offset &= genradix_depth_size(level) - 1;
+ }
+
+ return &n->data[offset];
+}
+EXPORT_SYMBOL(__genradix_ptr);
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ struct genradix_node *node;
+
+ node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
+
+ /*
+ * We're using pages (not slab allocations) directly for kernel data
+ * structures, so we need to explicitly inform kmemleak of them in order
+ * to avoid false positive memory leak reports.
+ */
+ kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
+ return node;
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kmemleak_free(node);
+ free_page((unsigned long)node);
+}
+
+/*
+ * Returns pointer to the specified byte @offset within @radix, allocating it if
+ * necessary - newly allocated slots are always zeroed out:
+ */
+void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ gfp_t gfp_mask)
+{
+ struct genradix_root *v = READ_ONCE(radix->root);
+ struct genradix_node *n, *new_node = NULL;
+ unsigned level;
+
+ /* Increase tree depth if necessary: */
+ while (1) {
+ struct genradix_root *r = v, *new_root;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (n && ilog2(offset) < genradix_depth_shift(level))
+ break;
+
+ if (!new_node) {
+ new_node = genradix_alloc_node(gfp_mask);
+ if (!new_node)
+ return NULL;
+ }
+
+ new_node->children[0] = n;
+ new_root = ((struct genradix_root *)
+ ((unsigned long) new_node | (n ? level + 1 : 0)));
+
+ if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ v = new_root;
+ new_node = NULL;
+ }
+ }
+
+ while (level--) {
+ struct genradix_node **p =
+ &n->children[offset >> genradix_depth_shift(level)];
+ offset &= genradix_depth_size(level) - 1;
+
+ n = READ_ONCE(*p);
+ if (!n) {
+ if (!new_node) {
+ new_node = genradix_alloc_node(gfp_mask);
+ if (!new_node)
+ return NULL;
+ }
+
+ if (!(n = cmpxchg_release(p, NULL, new_node)))
+ swap(n, new_node);
+ }
+ }
+
+ if (new_node)
+ genradix_free_node(new_node);
+
+ return &n->data[offset];
+}
+EXPORT_SYMBOL(__genradix_ptr_alloc);
+
+void *__genradix_iter_peek(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level))
+ return NULL;
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ if (iter->offset + objs_per_ptr < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return NULL;
+ }
+
+ i++;
+ iter->offset = round_down(iter->offset + objs_per_ptr,
+ objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) *
+ objs_per_page;
+ if (i == GENRADIX_ARY)
+ goto restart;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek);
+
+static void genradix_free_recurse(struct genradix_node *n, unsigned level)
+{
+ if (level) {
+ unsigned i;
+
+ for (i = 0; i < GENRADIX_ARY; i++)
+ if (n->children[i])
+ genradix_free_recurse(n->children[i], level - 1);
+ }
+
+ genradix_free_node(n);
+}
+
+int __genradix_prealloc(struct __genradix *radix, size_t size,
+ gfp_t gfp_mask)
+{
+ size_t offset;
+
+ for (offset = 0; offset < size; offset += PAGE_SIZE)
+ if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(__genradix_prealloc);
+
+void __genradix_free(struct __genradix *radix)
+{
+ struct genradix_root *r = xchg(&radix->root, NULL);
+
+ genradix_free_recurse(genradix_root_to_node(r),
+ genradix_root_to_depth(r));
+}
+EXPORT_SYMBOL(__genradix_free);
diff --git a/lib/glob.c b/lib/glob.c
new file mode 100644
index 000000000..15b73f490
--- /dev/null
+++ b/lib/glob.c
@@ -0,0 +1,123 @@
+#include <linux/module.h>
+#include <linux/glob.h>
+
+/*
+ * The only reason this code can be compiled as a module is because the
+ * ATA code that depends on it can be as well. In practice, they're
+ * both usually compiled in and the module overhead goes away.
+ */
+MODULE_DESCRIPTION("glob(7) matching");
+MODULE_LICENSE("Dual MIT/GPL");
+
+/**
+ * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
+ * @pat: Shell-style pattern to match, e.g. "*.[ch]".
+ * @str: String to match. The pattern must match the entire string.
+ *
+ * Perform shell-style glob matching, returning true (1) if the match
+ * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
+ *
+ * Pattern metacharacters are ?, *, [ and \.
+ * (And, inside character classes, !, - and ].)
+ *
+ * This is small and simple implementation intended for device blacklists
+ * where a string is matched against a number of patterns. Thus, it
+ * does not preprocess the patterns. It is non-recursive, and run-time
+ * is at most quadratic: strlen(@str)*strlen(@pat).
+ *
+ * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
+ * it takes 6 passes over the pattern before matching the string.
+ *
+ * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
+ * treat / or leading . specially; it isn't actually used for pathnames.
+ *
+ * Note that according to glob(7) (and unlike bash), character classes
+ * are complemented by a leading !; this does not support the regex-style
+ * [^a-z] syntax.
+ *
+ * An opening bracket without a matching close is matched literally.
+ */
+bool __pure glob_match(char const *pat, char const *str)
+{
+ /*
+ * Backtrack to previous * on mismatch and retry starting one
+ * character later in the string. Because * matches all characters
+ * (no exception for /), it can be easily proved that there's
+ * never a need to backtrack multiple levels.
+ */
+ char const *back_pat = NULL, *back_str;
+
+ /*
+ * Loop over each token (character or class) in pat, matching
+ * it against the remaining unmatched tail of str. Return false
+ * on mismatch, or true after matching the trailing nul bytes.
+ */
+ for (;;) {
+ unsigned char c = *str++;
+ unsigned char d = *pat++;
+
+ switch (d) {
+ case '?': /* Wildcard: anything but nul */
+ if (c == '\0')
+ return false;
+ break;
+ case '*': /* Any-length wildcard */
+ if (*pat == '\0') /* Optimize trailing * case */
+ return true;
+ back_pat = pat;
+ back_str = --str; /* Allow zero-length match */
+ break;
+ case '[': { /* Character class */
+ bool match = false, inverted = (*pat == '!');
+ char const *class = pat + inverted;
+ unsigned char a = *class++;
+
+ /*
+ * Iterate over each span in the character class.
+ * A span is either a single character a, or a
+ * range a-b. The first span may begin with ']'.
+ */
+ do {
+ unsigned char b = a;
+
+ if (a == '\0') /* Malformed */
+ goto literal;
+
+ if (class[0] == '-' && class[1] != ']') {
+ b = class[1];
+
+ if (b == '\0')
+ goto literal;
+
+ class += 2;
+ /* Any special action if a > b? */
+ }
+ match |= (a <= c && c <= b);
+ } while ((a = *class++) != ']');
+
+ if (match == inverted)
+ goto backtrack;
+ pat = class;
+ }
+ break;
+ case '\\':
+ d = *pat++;
+ fallthrough;
+ default: /* Literal character */
+literal:
+ if (c == d) {
+ if (d == '\0')
+ return true;
+ break;
+ }
+backtrack:
+ if (c == '\0' || !back_pat)
+ return false; /* No point continuing */
+ /* Try again from last *, one character later in str. */
+ pat = back_pat;
+ str = ++back_str;
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(glob_match);
diff --git a/lib/globtest.c b/lib/globtest.c
new file mode 100644
index 000000000..d8e97d43b
--- /dev/null
+++ b/lib/globtest.c
@@ -0,0 +1,167 @@
+/*
+ * Extracted fronm glob.c
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/glob.h>
+#include <linux/printk.h>
+
+/* Boot with "glob.verbose=1" to show successful tests, too */
+static bool verbose = false;
+module_param(verbose, bool, 0);
+
+struct glob_test {
+ char const *pat, *str;
+ bool expected;
+};
+
+static bool __pure __init test(char const *pat, char const *str, bool expected)
+{
+ bool match = glob_match(pat, str);
+ bool success = match == expected;
+
+ /* Can't get string literals into a particular section, so... */
+ static char const msg_error[] __initconst =
+ KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
+ static char const msg_ok[] __initconst =
+ KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
+ static char const mismatch[] __initconst = "mismatch";
+ char const *message;
+
+ if (!success)
+ message = msg_error;
+ else if (verbose)
+ message = msg_ok;
+ else
+ return success;
+
+ printk(message, pat, str, mismatch + 3*match);
+ return success;
+}
+
+/*
+ * The tests are all jammed together in one array to make it simpler
+ * to place that array in the .init.rodata section. The obvious
+ * "array of structures containing char *" has no way to force the
+ * pointed-to strings to be in a particular section.
+ *
+ * Anyway, a test consists of:
+ * 1. Expected glob_match result: '1' or '0'.
+ * 2. Pattern to match: null-terminated string
+ * 3. String to match against: null-terminated string
+ *
+ * The list of tests is terminated with a final '\0' instead of
+ * a glob_match result character.
+ */
+static char const glob_tests[] __initconst =
+ /* Some basic tests */
+ "1" "a\0" "a\0"
+ "0" "a\0" "b\0"
+ "0" "a\0" "aa\0"
+ "0" "a\0" "\0"
+ "1" "\0" "\0"
+ "0" "\0" "a\0"
+ /* Simple character class tests */
+ "1" "[a]\0" "a\0"
+ "0" "[a]\0" "b\0"
+ "0" "[!a]\0" "a\0"
+ "1" "[!a]\0" "b\0"
+ "1" "[ab]\0" "a\0"
+ "1" "[ab]\0" "b\0"
+ "0" "[ab]\0" "c\0"
+ "1" "[!ab]\0" "c\0"
+ "1" "[a-c]\0" "b\0"
+ "0" "[a-c]\0" "d\0"
+ /* Corner cases in character class parsing */
+ "1" "[a-c-e-g]\0" "-\0"
+ "0" "[a-c-e-g]\0" "d\0"
+ "1" "[a-c-e-g]\0" "f\0"
+ "1" "[]a-ceg-ik[]\0" "a\0"
+ "1" "[]a-ceg-ik[]\0" "]\0"
+ "1" "[]a-ceg-ik[]\0" "[\0"
+ "1" "[]a-ceg-ik[]\0" "h\0"
+ "0" "[]a-ceg-ik[]\0" "f\0"
+ "0" "[!]a-ceg-ik[]\0" "h\0"
+ "0" "[!]a-ceg-ik[]\0" "]\0"
+ "1" "[!]a-ceg-ik[]\0" "f\0"
+ /* Simple wild cards */
+ "1" "?\0" "a\0"
+ "0" "?\0" "aa\0"
+ "0" "??\0" "a\0"
+ "1" "?x?\0" "axb\0"
+ "0" "?x?\0" "abx\0"
+ "0" "?x?\0" "xab\0"
+ /* Asterisk wild cards (backtracking) */
+ "0" "*??\0" "a\0"
+ "1" "*??\0" "ab\0"
+ "1" "*??\0" "abc\0"
+ "1" "*??\0" "abcd\0"
+ "0" "??*\0" "a\0"
+ "1" "??*\0" "ab\0"
+ "1" "??*\0" "abc\0"
+ "1" "??*\0" "abcd\0"
+ "0" "?*?\0" "a\0"
+ "1" "?*?\0" "ab\0"
+ "1" "?*?\0" "abc\0"
+ "1" "?*?\0" "abcd\0"
+ "1" "*b\0" "b\0"
+ "1" "*b\0" "ab\0"
+ "0" "*b\0" "ba\0"
+ "1" "*b\0" "bb\0"
+ "1" "*b\0" "abb\0"
+ "1" "*b\0" "bab\0"
+ "1" "*bc\0" "abbc\0"
+ "1" "*bc\0" "bc\0"
+ "1" "*bc\0" "bbc\0"
+ "1" "*bc\0" "bcbc\0"
+ /* Multiple asterisks (complex backtracking) */
+ "1" "*ac*\0" "abacadaeafag\0"
+ "1" "*ac*ae*ag*\0" "abacadaeafag\0"
+ "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
+ "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
+ "1" "*abcd*\0" "abcabcabcabcdefg\0"
+ "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
+ "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
+ "0" "*abcd*\0" "abcabcabcabcefg\0"
+ "0" "*ab*cd*\0" "abcabcabcabcefg\0";
+
+static int __init glob_init(void)
+{
+ unsigned successes = 0;
+ unsigned n = 0;
+ char const *p = glob_tests;
+ static char const message[] __initconst =
+ KERN_INFO "glob: %u self-tests passed, %u failed\n";
+
+ /*
+ * Tests are jammed together in a string. The first byte is '1'
+ * or '0' to indicate the expected outcome, or '\0' to indicate the
+ * end of the tests. Then come two null-terminated strings: the
+ * pattern and the string to match it against.
+ */
+ while (*p) {
+ bool expected = *p++ & 1;
+ char const *pat = p;
+
+ p += strlen(p) + 1;
+ successes += test(pat, p, expected);
+ p += strlen(p) + 1;
+ n++;
+ }
+
+ n -= successes;
+ printk(message, successes, n);
+
+ /* What's the errno for "kernel bug detected"? Guess... */
+ return n ? -ECANCELED : 0;
+}
+
+/* We need a dummy exit function to allow unload */
+static void __exit glob_fini(void) { }
+
+module_init(glob_init);
+module_exit(glob_fini);
+
+MODULE_DESCRIPTION("glob(7) matching tests");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/group_cpus.c b/lib/group_cpus.c
new file mode 100644
index 000000000..ee272c4ce
--- /dev/null
+++ b/lib/group_cpus.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/sort.h>
+#include <linux/group_cpus.h>
+
+#ifdef CONFIG_SMP
+
+static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
+ unsigned int cpus_per_grp)
+{
+ const struct cpumask *siblmsk;
+ int cpu, sibl;
+
+ for ( ; cpus_per_grp > 0; ) {
+ cpu = cpumask_first(nmsk);
+
+ /* Should not happen, but I'm too lazy to think about it */
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ cpumask_clear_cpu(cpu, nmsk);
+ cpumask_set_cpu(cpu, irqmsk);
+ cpus_per_grp--;
+
+ /* If the cpu has siblings, use them first */
+ siblmsk = topology_sibling_cpumask(cpu);
+ for (sibl = -1; cpus_per_grp > 0; ) {
+ sibl = cpumask_next(sibl, siblmsk);
+ if (sibl >= nr_cpu_ids)
+ break;
+ if (!cpumask_test_and_clear_cpu(sibl, nmsk))
+ continue;
+ cpumask_set_cpu(sibl, irqmsk);
+ cpus_per_grp--;
+ }
+ }
+}
+
+static cpumask_var_t *alloc_node_to_cpumask(void)
+{
+ cpumask_var_t *masks;
+ int node;
+
+ masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!masks)
+ return NULL;
+
+ for (node = 0; node < nr_node_ids; node++) {
+ if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
+ goto out_unwind;
+ }
+
+ return masks;
+
+out_unwind:
+ while (--node >= 0)
+ free_cpumask_var(masks[node]);
+ kfree(masks);
+ return NULL;
+}
+
+static void free_node_to_cpumask(cpumask_var_t *masks)
+{
+ int node;
+
+ for (node = 0; node < nr_node_ids; node++)
+ free_cpumask_var(masks[node]);
+ kfree(masks);
+}
+
+static void build_node_to_cpumask(cpumask_var_t *masks)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
+}
+
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
+ const struct cpumask *mask, nodemask_t *nodemsk)
+{
+ int n, nodes = 0;
+
+ /* Calculate the number of nodes in the supplied affinity mask */
+ for_each_node(n) {
+ if (cpumask_intersects(mask, node_to_cpumask[n])) {
+ node_set(n, *nodemsk);
+ nodes++;
+ }
+ }
+ return nodes;
+}
+
+struct node_groups {
+ unsigned id;
+
+ union {
+ unsigned ngroups;
+ unsigned ncpus;
+ };
+};
+
+static int ncpus_cmp_func(const void *l, const void *r)
+{
+ const struct node_groups *ln = l;
+ const struct node_groups *rn = r;
+
+ return ln->ncpus - rn->ncpus;
+}
+
+/*
+ * Allocate group number for each node, so that for each node:
+ *
+ * 1) the allocated number is >= 1
+ *
+ * 2) the allocated number is <= active CPU number of this node
+ *
+ * The actual allocated total groups may be less than @numgrps when
+ * active total CPU number is less than @numgrps.
+ *
+ * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
+ * for each node.
+ */
+static void alloc_nodes_groups(unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ const nodemask_t nodemsk,
+ struct cpumask *nmsk,
+ struct node_groups *node_groups)
+{
+ unsigned n, remaining_ncpus = 0;
+
+ for (n = 0; n < nr_node_ids; n++) {
+ node_groups[n].id = n;
+ node_groups[n].ncpus = UINT_MAX;
+ }
+
+ for_each_node_mask(n, nodemsk) {
+ unsigned ncpus;
+
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ ncpus = cpumask_weight(nmsk);
+
+ if (!ncpus)
+ continue;
+ remaining_ncpus += ncpus;
+ node_groups[n].ncpus = ncpus;
+ }
+
+ numgrps = min_t(unsigned, remaining_ncpus, numgrps);
+
+ sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
+ ncpus_cmp_func, NULL);
+
+ /*
+ * Allocate groups for each node according to the ratio of this
+ * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
+ * bigger than number of active numa nodes. Always start the
+ * allocation from the node with minimized nr_cpus.
+ *
+ * This way guarantees that each active node gets allocated at
+ * least one group, and the theory is simple: over-allocation
+ * is only done when this node is assigned by one group, so
+ * other nodes will be allocated >= 1 groups, since 'numgrps' is
+ * bigger than number of numa nodes.
+ *
+ * One perfect invariant is that number of allocated groups for
+ * each node is <= CPU count of this node:
+ *
+ * 1) suppose there are two nodes: A and B
+ * ncpu(X) is CPU count of node X
+ * grps(X) is the group count allocated to node X via this
+ * algorithm
+ *
+ * ncpu(A) <= ncpu(B)
+ * ncpu(A) + ncpu(B) = N
+ * grps(A) + grps(B) = G
+ *
+ * grps(A) = max(1, round_down(G * ncpu(A) / N))
+ * grps(B) = G - grps(A)
+ *
+ * both N and G are integer, and 2 <= G <= N, suppose
+ * G = N - delta, and 0 <= delta <= N - 2
+ *
+ * 2) obviously grps(A) <= ncpu(A) because:
+ *
+ * if grps(A) is 1, then grps(A) <= ncpu(A) given
+ * ncpu(A) >= 1
+ *
+ * otherwise,
+ * grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
+ *
+ * 3) prove how grps(B) <= ncpu(B):
+ *
+ * if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
+ * over-allocated, so grps(B) <= ncpu(B),
+ *
+ * otherwise:
+ *
+ * grps(A) =
+ * round_down(G * ncpu(A) / N) =
+ * round_down((N - delta) * ncpu(A) / N) =
+ * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
+ * round_down((N * ncpu(A) - delta * N) / N) =
+ * cpu(A) - delta
+ *
+ * then:
+ *
+ * grps(A) - G >= ncpu(A) - delta - G
+ * =>
+ * G - grps(A) <= G + delta - ncpu(A)
+ * =>
+ * grps(B) <= N - ncpu(A)
+ * =>
+ * grps(B) <= cpu(B)
+ *
+ * For nodes >= 3, it can be thought as one node and another big
+ * node given that is exactly what this algorithm is implemented,
+ * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
+ * finally for each node X: grps(X) <= ncpu(X).
+ *
+ */
+ for (n = 0; n < nr_node_ids; n++) {
+ unsigned ngroups, ncpus;
+
+ if (node_groups[n].ncpus == UINT_MAX)
+ continue;
+
+ WARN_ON_ONCE(numgrps == 0);
+
+ ncpus = node_groups[n].ncpus;
+ ngroups = max_t(unsigned, 1,
+ numgrps * ncpus / remaining_ncpus);
+ WARN_ON_ONCE(ngroups > ncpus);
+
+ node_groups[n].ngroups = ngroups;
+
+ remaining_ncpus -= ncpus;
+ numgrps -= ngroups;
+ }
+}
+
+static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk, struct cpumask *masks)
+{
+ unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
+ unsigned int last_grp = numgrps;
+ unsigned int curgrp = startgrp;
+ nodemask_t nodemsk = NODE_MASK_NONE;
+ struct node_groups *node_groups;
+
+ if (cpumask_empty(cpu_mask))
+ return 0;
+
+ nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
+
+ /*
+ * If the number of nodes in the mask is greater than or equal the
+ * number of groups we just spread the groups across the nodes.
+ */
+ if (numgrps <= nodes) {
+ for_each_node_mask(n, nodemsk) {
+ /* Ensure that only CPUs which are in both masks are set */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
+ if (++curgrp == last_grp)
+ curgrp = 0;
+ }
+ return numgrps;
+ }
+
+ node_groups = kcalloc(nr_node_ids,
+ sizeof(struct node_groups),
+ GFP_KERNEL);
+ if (!node_groups)
+ return -ENOMEM;
+
+ /* allocate group number for each node */
+ alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
+ nodemsk, nmsk, node_groups);
+ for (i = 0; i < nr_node_ids; i++) {
+ unsigned int ncpus, v;
+ struct node_groups *nv = &node_groups[i];
+
+ if (nv->ngroups == UINT_MAX)
+ continue;
+
+ /* Get the cpus on this node which are in the mask */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
+ ncpus = cpumask_weight(nmsk);
+ if (!ncpus)
+ continue;
+
+ WARN_ON_ONCE(nv->ngroups > ncpus);
+
+ /* Account for rounding errors */
+ extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
+
+ /* Spread allocated groups on CPUs of the current node */
+ for (v = 0; v < nv->ngroups; v++, curgrp++) {
+ cpus_per_grp = ncpus / nv->ngroups;
+
+ /* Account for extra groups to compensate rounding errors */
+ if (extra_grps) {
+ cpus_per_grp++;
+ --extra_grps;
+ }
+
+ /*
+ * wrapping has to be considered given 'startgrp'
+ * may start anywhere
+ */
+ if (curgrp >= last_grp)
+ curgrp = 0;
+ grp_spread_init_one(&masks[curgrp], nmsk,
+ cpus_per_grp);
+ }
+ done += nv->ngroups;
+ }
+ kfree(node_groups);
+ return done;
+}
+
+/**
+ * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
+ * @numgrps: number of groups
+ *
+ * Return: cpumask array if successful, NULL otherwise. And each element
+ * includes CPUs assigned to this group
+ *
+ * Try to put close CPUs from viewpoint of CPU and NUMA locality into
+ * same group, and run two-stage grouping:
+ * 1) allocate present CPUs on these groups evenly first
+ * 2) allocate other possible CPUs on these groups evenly
+ *
+ * We guarantee in the resulted grouping that all CPUs are covered, and
+ * no same CPU is assigned to multiple groups
+ */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+ unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
+ cpumask_var_t *node_to_cpumask;
+ cpumask_var_t nmsk, npresmsk;
+ int ret = -ENOMEM;
+ struct cpumask *masks = NULL;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return NULL;
+
+ if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+ goto fail_nmsk;
+
+ node_to_cpumask = alloc_node_to_cpumask();
+ if (!node_to_cpumask)
+ goto fail_npresmsk;
+
+ masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+ if (!masks)
+ goto fail_node_to_cpumask;
+
+ build_node_to_cpumask(node_to_cpumask);
+
+ /*
+ * Make a local cache of 'cpu_present_mask', so the two stages
+ * spread can observe consistent 'cpu_present_mask' without holding
+ * cpu hotplug lock, then we can reduce deadlock risk with cpu
+ * hotplug code.
+ *
+ * Here CPU hotplug may happen when reading `cpu_present_mask`, and
+ * we can live with the case because it only affects that hotplug
+ * CPU is handled in the 1st or 2nd stage, and either way is correct
+ * from API user viewpoint since 2-stage spread is sort of
+ * optimization.
+ */
+ cpumask_copy(npresmsk, data_race(cpu_present_mask));
+
+ /* grouping present CPUs first */
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (ret < 0)
+ goto fail_build_affinity;
+ nr_present = ret;
+
+ /*
+ * Allocate non present CPUs starting from the next group to be
+ * handled. If the grouping of present CPUs already exhausted the
+ * group space, assign the non present CPUs to the already
+ * allocated out groups.
+ */
+ if (nr_present >= numgrps)
+ curgrp = 0;
+ else
+ curgrp = nr_present;
+ cpumask_andnot(npresmsk, cpu_possible_mask, npresmsk);
+ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ if (ret >= 0)
+ nr_others = ret;
+
+ fail_build_affinity:
+ if (ret >= 0)
+ WARN_ON(nr_present + nr_others < numgrps);
+
+ fail_node_to_cpumask:
+ free_node_to_cpumask(node_to_cpumask);
+
+ fail_npresmsk:
+ free_cpumask_var(npresmsk);
+
+ fail_nmsk:
+ free_cpumask_var(nmsk);
+ if (ret < 0) {
+ kfree(masks);
+ return NULL;
+ }
+ return masks;
+}
+#else /* CONFIG_SMP */
+struct cpumask *group_cpus_evenly(unsigned int numgrps)
+{
+ struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
+
+ if (!masks)
+ return NULL;
+
+ /* assign all CPUs(cpu 0) to the 1st group only */
+ cpumask_copy(&masks[0], cpu_possible_mask);
+ return masks;
+}
+#endif /* CONFIG_SMP */
+EXPORT_SYMBOL_GPL(group_cpus_evenly);
diff --git a/lib/hashtable_test.c b/lib/hashtable_test.c
new file mode 100644
index 000000000..1d1b3288d
--- /dev/null
+++ b/lib/hashtable_test.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Hashtable structures.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/hashtable.h>
+
+struct hashtable_test_entry {
+ int key;
+ int data;
+ struct hlist_node node;
+ int visited;
+};
+
+static void hashtable_test_hash_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a hashtable. */
+ DEFINE_HASHTABLE(hash1, 2);
+ DECLARE_HASHTABLE(hash2, 3);
+
+ /* When using DECLARE_HASHTABLE, must use hash_init to
+ * initialize the hashtable.
+ */
+ hash_init(hash2);
+
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash1));
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash2));
+}
+
+static void hashtable_test_hash_empty(struct kunit *test)
+{
+ struct hashtable_test_entry a;
+ DEFINE_HASHTABLE(hash, 1);
+
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash));
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+
+ /* Hashtable should no longer be empty. */
+ KUNIT_EXPECT_FALSE(test, hash_empty(hash));
+}
+
+static void hashtable_test_hash_hashed(struct kunit *test)
+{
+ struct hashtable_test_entry a, b;
+ DEFINE_HASHTABLE(hash, 4);
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+ b.key = 1;
+ b.data = 2;
+ hash_add(hash, &b.node, b.key);
+
+ KUNIT_EXPECT_TRUE(test, hash_hashed(&a.node));
+ KUNIT_EXPECT_TRUE(test, hash_hashed(&b.node));
+}
+
+static void hashtable_test_hash_add(struct kunit *test)
+{
+ struct hashtable_test_entry a, b, *x;
+ int bkt;
+ DEFINE_HASHTABLE(hash, 3);
+
+ a.key = 1;
+ a.data = 13;
+ a.visited = 0;
+ hash_add(hash, &a.node, a.key);
+ b.key = 2;
+ b.data = 10;
+ b.visited = 0;
+ hash_add(hash, &b.node, b.key);
+
+ hash_for_each(hash, bkt, x, node) {
+ x->visited++;
+ if (x->key == a.key)
+ KUNIT_EXPECT_EQ(test, x->data, 13);
+ else if (x->key == b.key)
+ KUNIT_EXPECT_EQ(test, x->data, 10);
+ else
+ KUNIT_FAIL(test, "Unexpected key in hashtable.");
+ }
+
+ /* Both entries should have been visited exactly once. */
+ KUNIT_EXPECT_EQ(test, a.visited, 1);
+ KUNIT_EXPECT_EQ(test, b.visited, 1);
+}
+
+static void hashtable_test_hash_del(struct kunit *test)
+{
+ struct hashtable_test_entry a, b, *x;
+ DEFINE_HASHTABLE(hash, 6);
+
+ a.key = 1;
+ a.data = 13;
+ hash_add(hash, &a.node, a.key);
+ b.key = 2;
+ b.data = 10;
+ b.visited = 0;
+ hash_add(hash, &b.node, b.key);
+
+ hash_del(&b.node);
+ hash_for_each_possible(hash, x, node, b.key) {
+ x->visited++;
+ KUNIT_EXPECT_NE(test, x->key, b.key);
+ }
+
+ /* The deleted entry should not have been visited. */
+ KUNIT_EXPECT_EQ(test, b.visited, 0);
+
+ hash_del(&a.node);
+
+ /* The hashtable should be empty. */
+ KUNIT_EXPECT_TRUE(test, hash_empty(hash));
+}
+
+static void hashtable_test_hash_for_each(struct kunit *test)
+{
+ struct hashtable_test_entry entries[3];
+ struct hashtable_test_entry *x;
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 3);
+
+ /* Add three entries to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = i;
+ entries[i].data = i + 10;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ count = 0;
+ hash_for_each(hash, bkt, x, node) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable.");
+ count++;
+ }
+
+ /* Should have visited each entry exactly once. */
+ KUNIT_EXPECT_EQ(test, count, 3);
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+}
+
+static void hashtable_test_hash_for_each_safe(struct kunit *test)
+{
+ struct hashtable_test_entry entries[3];
+ struct hashtable_test_entry *x;
+ struct hlist_node *tmp;
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 3);
+
+ /* Add three entries to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = i;
+ entries[i].data = i + 10;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ count = 0;
+ hash_for_each_safe(hash, bkt, tmp, x, node) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->key, 3, "Unexpected key in hashtable.");
+ count++;
+
+ /* Delete entry during loop. */
+ hash_del(&x->node);
+ }
+
+ /* Should have visited each entry exactly once. */
+ KUNIT_EXPECT_EQ(test, count, 3);
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+}
+
+static void hashtable_test_hash_for_each_possible(struct kunit *test)
+{
+ struct hashtable_test_entry entries[4];
+ struct hashtable_test_entry *x, *y;
+ int buckets[2];
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 5);
+
+ /* Add three entries with key = 0 to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = 0;
+ entries[i].data = i;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ /* Add an entry with key = 1. */
+ entries[3].key = 1;
+ entries[3].data = 3;
+ entries[3].visited = 0;
+ hash_add(hash, &entries[3].node, entries[3].key);
+
+ count = 0;
+ hash_for_each_possible(hash, x, node, 0) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable.");
+ count++;
+ }
+
+ /* Should have visited each entry with key = 0 exactly once. */
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+
+ /* Save the buckets for the different keys. */
+ hash_for_each(hash, bkt, y, node) {
+ KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable.");
+ buckets[y->key] = bkt;
+ }
+
+ /* If entry with key = 1 is in the same bucket as the entries with
+ * key = 0, check it was visited. Otherwise ensure that only three
+ * entries were visited.
+ */
+ if (buckets[0] == buckets[1]) {
+ KUNIT_EXPECT_EQ(test, count, 4);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 1);
+ } else {
+ KUNIT_EXPECT_EQ(test, count, 3);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 0);
+ }
+}
+
+static void hashtable_test_hash_for_each_possible_safe(struct kunit *test)
+{
+ struct hashtable_test_entry entries[4];
+ struct hashtable_test_entry *x, *y;
+ struct hlist_node *tmp;
+ int buckets[2];
+ int bkt, i, j, count;
+ DEFINE_HASHTABLE(hash, 5);
+
+ /* Add three entries with key = 0 to the hashtable. */
+ for (i = 0; i < 3; i++) {
+ entries[i].key = 0;
+ entries[i].data = i;
+ entries[i].visited = 0;
+ hash_add(hash, &entries[i].node, entries[i].key);
+ }
+
+ /* Add an entry with key = 1. */
+ entries[3].key = 1;
+ entries[3].data = 3;
+ entries[3].visited = 0;
+ hash_add(hash, &entries[3].node, entries[3].key);
+
+ count = 0;
+ hash_for_each_possible_safe(hash, x, tmp, node, 0) {
+ x->visited += 1;
+ KUNIT_ASSERT_GE_MSG(test, x->data, 0, "Unexpected data in hashtable.");
+ KUNIT_ASSERT_LT_MSG(test, x->data, 4, "Unexpected data in hashtable.");
+ count++;
+
+ /* Delete entry during loop. */
+ hash_del(&x->node);
+ }
+
+ /* Should have visited each entry with key = 0 exactly once. */
+ for (j = 0; j < 3; j++)
+ KUNIT_EXPECT_EQ(test, entries[j].visited, 1);
+
+ /* Save the buckets for the different keys. */
+ hash_for_each(hash, bkt, y, node) {
+ KUNIT_ASSERT_GE_MSG(test, y->key, 0, "Unexpected key in hashtable.");
+ KUNIT_ASSERT_LE_MSG(test, y->key, 1, "Unexpected key in hashtable.");
+ buckets[y->key] = bkt;
+ }
+
+ /* If entry with key = 1 is in the same bucket as the entries with
+ * key = 0, check it was visited. Otherwise ensure that only three
+ * entries were visited.
+ */
+ if (buckets[0] == buckets[1]) {
+ KUNIT_EXPECT_EQ(test, count, 4);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 1);
+ } else {
+ KUNIT_EXPECT_EQ(test, count, 3);
+ KUNIT_EXPECT_EQ(test, entries[3].visited, 0);
+ }
+}
+
+static struct kunit_case hashtable_test_cases[] = {
+ KUNIT_CASE(hashtable_test_hash_init),
+ KUNIT_CASE(hashtable_test_hash_empty),
+ KUNIT_CASE(hashtable_test_hash_hashed),
+ KUNIT_CASE(hashtable_test_hash_add),
+ KUNIT_CASE(hashtable_test_hash_del),
+ KUNIT_CASE(hashtable_test_hash_for_each),
+ KUNIT_CASE(hashtable_test_hash_for_each_safe),
+ KUNIT_CASE(hashtable_test_hash_for_each_possible),
+ KUNIT_CASE(hashtable_test_hash_for_each_possible_safe),
+ {},
+};
+
+static struct kunit_suite hashtable_test_module = {
+ .name = "hashtable",
+ .test_cases = hashtable_test_cases,
+};
+
+kunit_test_suites(&hashtable_test_module);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/hexdump.c b/lib/hexdump.c
new file mode 100644
index 000000000..06833d404
--- /dev/null
+++ b/lib/hexdump.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * lib/hexdump.c
+ */
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/export.h>
+#include <asm/unaligned.h>
+
+const char hex_asc[] = "0123456789abcdef";
+EXPORT_SYMBOL(hex_asc);
+const char hex_asc_upper[] = "0123456789ABCDEF";
+EXPORT_SYMBOL(hex_asc_upper);
+
+/**
+ * hex_to_bin - convert a hex digit to its real value
+ * @ch: ascii character represents hex digit
+ *
+ * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
+ * input.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ * '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ * shift of a negative value is implementation-defined, so we cast the
+ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ * the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ * in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ * ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ * lowercase to uppercase
+ */
+int hex_to_bin(unsigned char ch)
+{
+ unsigned char cu = ch & 0xdf;
+ return -1 +
+ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
+}
+EXPORT_SYMBOL(hex_to_bin);
+
+/**
+ * hex2bin - convert an ascii hexadecimal string to its binary representation
+ * @dst: binary result
+ * @src: ascii hexadecimal string
+ * @count: result length
+ *
+ * Return 0 on success, -EINVAL in case of bad input.
+ */
+int hex2bin(u8 *dst, const char *src, size_t count)
+{
+ while (count--) {
+ int hi, lo;
+
+ hi = hex_to_bin(*src++);
+ if (unlikely(hi < 0))
+ return -EINVAL;
+ lo = hex_to_bin(*src++);
+ if (unlikely(lo < 0))
+ return -EINVAL;
+
+ *dst++ = (hi << 4) | lo;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(hex2bin);
+
+/**
+ * bin2hex - convert binary data to an ascii hexadecimal string
+ * @dst: ascii hexadecimal result
+ * @src: binary data
+ * @count: binary data length
+ */
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+ const unsigned char *_src = src;
+
+ while (count--)
+ dst = hex_byte_pack(dst, *_src++);
+ return dst;
+}
+EXPORT_SYMBOL(bin2hex);
+
+/**
+ * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @linebuf: where to put the converted data
+ * @linebuflen: total size of @linebuf, including space for terminating NUL
+ * @ascii: include ASCII after the hex output
+ *
+ * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
+ * 16 or 32 bytes of input data converted to hex + ASCII output.
+ *
+ * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
+ * to a hex + ASCII dump at the supplied memory location.
+ * The converted output is always NUL-terminated.
+ *
+ * E.g.:
+ * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
+ * linebuf, sizeof(linebuf), true);
+ *
+ * example output buffer:
+ * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ *
+ * Return:
+ * The amount of bytes placed in the buffer without terminating NUL. If the
+ * output was truncated, then the return value is the number of bytes
+ * (excluding the terminating NUL) which would have been written to the final
+ * string if enough space had been available.
+ */
+int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii)
+{
+ const u8 *ptr = buf;
+ int ngroups;
+ u8 ch;
+ int j, lx = 0;
+ int ascii_column;
+ int ret;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ if (len > rowsize) /* limit to one line at a time */
+ len = rowsize;
+ if (!is_power_of_2(groupsize) || groupsize > 8)
+ groupsize = 1;
+ if ((len % groupsize) != 0) /* no mixed size output */
+ groupsize = 1;
+
+ ngroups = len / groupsize;
+ ascii_column = rowsize * 2 + rowsize / groupsize + 1;
+
+ if (!linebuflen)
+ goto overflow1;
+
+ if (!len)
+ goto nil;
+
+ if (groupsize == 8) {
+ const u64 *ptr8 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ get_unaligned(ptr8 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 4) {
+ const u32 *ptr4 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%8.8x", j ? " " : "",
+ get_unaligned(ptr4 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 2) {
+ const u16 *ptr2 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%4.4x", j ? " " : "",
+ get_unaligned(ptr2 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else {
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ ch = ptr[j];
+ linebuf[lx++] = hex_asc_hi(ch);
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = hex_asc_lo(ch);
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = ' ';
+ }
+ if (j)
+ lx--;
+ }
+ if (!ascii)
+ goto nil;
+
+ while (lx < ascii_column) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = ' ';
+ }
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ ch = ptr[j];
+ linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
+ }
+nil:
+ linebuf[lx] = '\0';
+ return lx;
+overflow2:
+ linebuf[lx++] = '\0';
+overflow1:
+ return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
+}
+EXPORT_SYMBOL(hex_dump_to_buffer);
+
+#ifdef CONFIG_PRINTK
+/**
+ * print_hex_dump - print a text hex dump to syslog for a binary blob of data
+ * @level: kernel log level (e.g. KERN_DEBUG)
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @ascii: include ASCII after the hex output
+ *
+ * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
+ * to the kernel log at the specified kernel log level, with an optional
+ * leading prefix.
+ *
+ * print_hex_dump() works on one "line" of output at a time, i.e.,
+ * 16 or 32 bytes of input data converted to hex + ASCII output.
+ * print_hex_dump() iterates over the entire input @buf, breaking it into
+ * "line size" chunks to format and print.
+ *
+ * E.g.:
+ * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
+ * 16, 1, frame->data, frame->len, true);
+ *
+ * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
+ * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
+ * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
+ */
+void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ printk("%s%s%p: %s\n",
+ level, prefix_str, ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+ break;
+ default:
+ printk("%s%s%s\n", level, prefix_str, linebuf);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(print_hex_dump);
+
+#endif /* defined(CONFIG_PRINTK) */
diff --git a/lib/hweight.c b/lib/hweight.c
new file mode 100644
index 000000000..c94586b62
--- /dev/null
+++ b/lib/hweight.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/bitops.h>
+#include <asm/types.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int __sw_hweight32(unsigned int w)
+{
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x55555555;
+ w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
+ w = (w + (w >> 4)) & 0x0f0f0f0f;
+ return (w * 0x01010101) >> 24;
+#else
+ unsigned int res = w - ((w >> 1) & 0x55555555);
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+#endif
+}
+EXPORT_SYMBOL(__sw_hweight32);
+
+unsigned int __sw_hweight16(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x5555);
+ res = (res & 0x3333) + ((res >> 2) & 0x3333);
+ res = (res + (res >> 4)) & 0x0F0F;
+ return (res + (res >> 8)) & 0x00FF;
+}
+EXPORT_SYMBOL(__sw_hweight16);
+
+unsigned int __sw_hweight8(unsigned int w)
+{
+ unsigned int res = w - ((w >> 1) & 0x55);
+ res = (res & 0x33) + ((res >> 2) & 0x33);
+ return (res + (res >> 4)) & 0x0F;
+}
+EXPORT_SYMBOL(__sw_hweight8);
+
+unsigned long __sw_hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+ return __sw_hweight32((unsigned int)(w >> 32)) +
+ __sw_hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x5555555555555555ul;
+ w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
+ w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
+ return (w * 0x0101010101010101ul) >> 56;
+#else
+ __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+ res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+#endif
+}
+EXPORT_SYMBOL(__sw_hweight64);
diff --git a/lib/idr.c b/lib/idr.c
new file mode 100644
index 000000000..da36054c3
--- /dev/null
+++ b/lib/idr.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+/**
+ * idr_alloc_u32() - Allocate an ID.
+ * @idr: IDR handle.
+ * @ptr: Pointer to be associated with the new ID.
+ * @nextid: Pointer to an ID.
+ * @max: The maximum ID to allocate (inclusive).
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @nextid and @max.
+ * Note that @max is inclusive whereas the @end parameter to idr_alloc()
+ * is exclusive. The new ID is assigned to @nextid before the pointer
+ * is inserted into the IDR, so if @nextid points into the object pointed
+ * to by @ptr, a concurrent lookup will not find an uninitialised ID.
+ *
+ * The caller should provide their own locking to ensure that two
+ * concurrent modifications to the IDR are not possible. Read-only
+ * accesses to the IDR may be done under the RCU read lock or may
+ * exclude simultaneous writers.
+ *
+ * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
+ * or -ENOSPC if no free IDs could be found. If an error occurred,
+ * @nextid is unchanged.
+ */
+int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
+ unsigned long max, gfp_t gfp)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ unsigned int base = idr->idr_base;
+ unsigned int id = *nextid;
+
+ if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
+ idr->idr_rt.xa_flags |= IDR_RT_MARKER;
+
+ id = (id < base) ? 0 : id - base;
+ radix_tree_iter_init(&iter, id);
+ slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
+
+ *nextid = iter.index + base;
+ /* there is a memory barrier inside radix_tree_iter_replace() */
+ radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
+ radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(idr_alloc_u32);
+
+/**
+ * idr_alloc() - Allocate an ID.
+ * @idr: IDR handle.
+ * @ptr: Pointer to be associated with the new ID.
+ * @start: The minimum ID (inclusive).
+ * @end: The maximum ID (exclusive).
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @start and @end. If
+ * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
+ * callers to use @start + N as @end as long as N is within integer range.
+ *
+ * The caller should provide their own locking to ensure that two
+ * concurrent modifications to the IDR are not possible. Read-only
+ * accesses to the IDR may be done under the RCU read lock or may
+ * exclude simultaneous writers.
+ *
+ * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
+ * or -ENOSPC if no free IDs could be found.
+ */
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
+{
+ u32 id = start;
+ int ret;
+
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+
+ ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
+ if (ret)
+ return ret;
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(idr_alloc);
+
+/**
+ * idr_alloc_cyclic() - Allocate an ID cyclically.
+ * @idr: IDR handle.
+ * @ptr: Pointer to be associated with the new ID.
+ * @start: The minimum ID (inclusive).
+ * @end: The maximum ID (exclusive).
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @start and @end. If
+ * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
+ * callers to use @start + N as @end as long as N is within integer range.
+ * The search for an unused ID will start at the last ID allocated and will
+ * wrap around to @start if no free IDs are found before reaching @end.
+ *
+ * The caller should provide their own locking to ensure that two
+ * concurrent modifications to the IDR are not possible. Read-only
+ * accesses to the IDR may be done under the RCU read lock or may
+ * exclude simultaneous writers.
+ *
+ * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
+ * or -ENOSPC if no free IDs could be found.
+ */
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
+{
+ u32 id = idr->idr_next;
+ int err, max = end > 0 ? end - 1 : INT_MAX;
+
+ if ((int)id < start)
+ id = start;
+
+ err = idr_alloc_u32(idr, ptr, &id, max, gfp);
+ if ((err == -ENOSPC) && (id > start)) {
+ id = start;
+ err = idr_alloc_u32(idr, ptr, &id, max, gfp);
+ }
+ if (err)
+ return err;
+
+ idr->idr_next = id + 1;
+ return id;
+}
+EXPORT_SYMBOL(idr_alloc_cyclic);
+
+/**
+ * idr_remove() - Remove an ID from the IDR.
+ * @idr: IDR handle.
+ * @id: Pointer ID.
+ *
+ * Removes this ID from the IDR. If the ID was not previously in the IDR,
+ * this function returns %NULL.
+ *
+ * Since this function modifies the IDR, the caller should provide their
+ * own locking to ensure that concurrent modification of the same IDR is
+ * not possible.
+ *
+ * Return: The pointer formerly associated with this ID.
+ */
+void *idr_remove(struct idr *idr, unsigned long id)
+{
+ return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
+}
+EXPORT_SYMBOL_GPL(idr_remove);
+
+/**
+ * idr_find() - Return pointer for given ID.
+ * @idr: IDR handle.
+ * @id: Pointer ID.
+ *
+ * Looks up the pointer associated with this ID. A %NULL pointer may
+ * indicate that @id is not allocated or that the %NULL pointer was
+ * associated with this ID.
+ *
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
+ *
+ * Return: The pointer associated with this ID.
+ */
+void *idr_find(const struct idr *idr, unsigned long id)
+{
+ return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
+}
+EXPORT_SYMBOL_GPL(idr_find);
+
+/**
+ * idr_for_each() - Iterate through all stored pointers.
+ * @idr: IDR handle.
+ * @fn: Function to be called for each pointer.
+ * @data: Data passed to callback function.
+ *
+ * The callback function will be called for each entry in @idr, passing
+ * the ID, the entry and @data.
+ *
+ * If @fn returns anything other than %0, the iteration stops and that
+ * value is returned from this function.
+ *
+ * idr_for_each() can be called concurrently with idr_alloc() and
+ * idr_remove() if protected by RCU. Newly added entries may not be
+ * seen and deleted entries may be seen, but adding and removing entries
+ * will not cause other entries to be skipped, nor spurious ones to be seen.
+ */
+int idr_for_each(const struct idr *idr,
+ int (*fn)(int id, void *p, void *data), void *data)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ int base = idr->idr_base;
+
+ radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
+ int ret;
+ unsigned long id = iter.index + base;
+
+ if (WARN_ON_ONCE(id > INT_MAX))
+ break;
+ ret = fn(id, rcu_dereference_raw(*slot), data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(idr_for_each);
+
+/**
+ * idr_get_next_ul() - Find next populated entry.
+ * @idr: IDR handle.
+ * @nextid: Pointer to an ID.
+ *
+ * Returns the next populated entry in the tree with an ID greater than
+ * or equal to the value pointed to by @nextid. On exit, @nextid is updated
+ * to the ID of the found value. To use in a loop, the value pointed to by
+ * nextid must be incremented by the user.
+ */
+void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ void *entry = NULL;
+ unsigned long base = idr->idr_base;
+ unsigned long id = *nextid;
+
+ id = (id < base) ? 0 : id - base;
+ radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
+ entry = rcu_dereference_raw(*slot);
+ if (!entry)
+ continue;
+ if (!xa_is_internal(entry))
+ break;
+ if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
+ break;
+ slot = radix_tree_iter_retry(&iter);
+ }
+ if (!slot)
+ return NULL;
+
+ *nextid = iter.index + base;
+ return entry;
+}
+EXPORT_SYMBOL(idr_get_next_ul);
+
+/**
+ * idr_get_next() - Find next populated entry.
+ * @idr: IDR handle.
+ * @nextid: Pointer to an ID.
+ *
+ * Returns the next populated entry in the tree with an ID greater than
+ * or equal to the value pointed to by @nextid. On exit, @nextid is updated
+ * to the ID of the found value. To use in a loop, the value pointed to by
+ * nextid must be incremented by the user.
+ */
+void *idr_get_next(struct idr *idr, int *nextid)
+{
+ unsigned long id = *nextid;
+ void *entry = idr_get_next_ul(idr, &id);
+
+ if (WARN_ON_ONCE(id > INT_MAX))
+ return NULL;
+ *nextid = id;
+ return entry;
+}
+EXPORT_SYMBOL(idr_get_next);
+
+/**
+ * idr_replace() - replace pointer for given ID.
+ * @idr: IDR handle.
+ * @ptr: New pointer to associate with the ID.
+ * @id: ID to change.
+ *
+ * Replace the pointer registered with an ID and return the old value.
+ * This function can be called under the RCU read lock concurrently with
+ * idr_alloc() and idr_remove() (as long as the ID being removed is not
+ * the one being replaced!).
+ *
+ * Returns: the old value on success. %-ENOENT indicates that @id was not
+ * found. %-EINVAL indicates that @ptr was not valid.
+ */
+void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
+{
+ struct radix_tree_node *node;
+ void __rcu **slot = NULL;
+ void *entry;
+
+ id -= idr->idr_base;
+
+ entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
+ if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
+ return ERR_PTR(-ENOENT);
+
+ __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
+
+ return entry;
+}
+EXPORT_SYMBOL(idr_replace);
+
+/**
+ * DOC: IDA description
+ *
+ * The IDA is an ID allocator which does not provide the ability to
+ * associate an ID with a pointer. As such, it only needs to store one
+ * bit per ID, and so is more space efficient than an IDR. To use an IDA,
+ * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
+ * then initialise it using ida_init()). To allocate a new ID, call
+ * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
+ * To free an ID, call ida_free().
+ *
+ * ida_destroy() can be used to dispose of an IDA without needing to
+ * free the individual IDs in it. You can use ida_is_empty() to find
+ * out whether the IDA has any IDs currently allocated.
+ *
+ * The IDA handles its own locking. It is safe to call any of the IDA
+ * functions without synchronisation in your code.
+ *
+ * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
+ * limitation, it should be quite straightforward to raise the maximum.
+ */
+
+/*
+ * Developer's notes:
+ *
+ * The IDA uses the functionality provided by the XArray to store bitmaps in
+ * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
+ * have been set.
+ *
+ * I considered telling the XArray that each slot is an order-10 node
+ * and indexing by bit number, but the XArray can't allow a single multi-index
+ * entry in the head, which would significantly increase memory consumption
+ * for the IDA. So instead we divide the index by the number of bits in the
+ * leaf bitmap before doing a radix tree lookup.
+ *
+ * As an optimisation, if there are only a few low bits set in any given
+ * leaf, instead of allocating a 128-byte bitmap, we store the bits
+ * as a value entry. Value entries never have the XA_FREE_MARK cleared
+ * because we can always convert them into a bitmap entry.
+ *
+ * It would be possible to optimise further; once we've run out of a
+ * single 128-byte bitmap, we currently switch to a 576-byte node, put
+ * the 128-byte bitmap in the first entry and then start allocating extra
+ * 128-byte entries. We could instead use the 512 bytes of the node's
+ * data as a bitmap before moving to that scheme. I do not believe this
+ * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
+ * users of the IDA and almost none of them use more than 1024 entries.
+ * Those that do use more than the 8192 IDs that the 512 bytes would
+ * provide.
+ *
+ * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
+ * equivalent, it will still need locking. Going to RCU lookup would require
+ * using RCU to free bitmaps, and that's not trivial without embedding an
+ * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
+ * bitmap, which is excessive.
+ */
+
+/**
+ * ida_alloc_range() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and @max, inclusive. The allocated ID will
+ * not exceed %INT_MAX, even if @max is larger.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
+ gfp_t gfp)
+{
+ XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
+ unsigned bit = min % IDA_BITMAP_BITS;
+ unsigned long flags;
+ struct ida_bitmap *bitmap, *alloc = NULL;
+
+ if ((int)min < 0)
+ return -ENOSPC;
+
+ if ((int)max < 0)
+ max = INT_MAX;
+
+retry:
+ xas_lock_irqsave(&xas, flags);
+next:
+ bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
+ if (xas.xa_index > min / IDA_BITMAP_BITS)
+ bit = 0;
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+
+ if (xa_is_value(bitmap)) {
+ unsigned long tmp = xa_to_value(bitmap);
+
+ if (bit < BITS_PER_XA_VALUE) {
+ bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+ if (bit < BITS_PER_XA_VALUE) {
+ tmp |= 1UL << bit;
+ xas_store(&xas, xa_mk_value(tmp));
+ goto out;
+ }
+ }
+ bitmap = alloc;
+ if (!bitmap)
+ bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
+ if (!bitmap)
+ goto alloc;
+ bitmap->bitmap[0] = tmp;
+ xas_store(&xas, bitmap);
+ if (xas_error(&xas)) {
+ bitmap->bitmap[0] = 0;
+ goto out;
+ }
+ }
+
+ if (bitmap) {
+ bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
+ if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
+ goto nospc;
+ if (bit == IDA_BITMAP_BITS)
+ goto next;
+
+ __set_bit(bit, bitmap->bitmap);
+ if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ if (bit < BITS_PER_XA_VALUE) {
+ bitmap = xa_mk_value(1UL << bit);
+ } else {
+ bitmap = alloc;
+ if (!bitmap)
+ bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
+ if (!bitmap)
+ goto alloc;
+ __set_bit(bit, bitmap->bitmap);
+ }
+ xas_store(&xas, bitmap);
+ }
+out:
+ xas_unlock_irqrestore(&xas, flags);
+ if (xas_nomem(&xas, gfp)) {
+ xas.xa_index = min / IDA_BITMAP_BITS;
+ bit = min % IDA_BITMAP_BITS;
+ goto retry;
+ }
+ if (bitmap != alloc)
+ kfree(alloc);
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ return xas.xa_index * IDA_BITMAP_BITS + bit;
+alloc:
+ xas_unlock_irqrestore(&xas, flags);
+ alloc = kzalloc(sizeof(*bitmap), gfp);
+ if (!alloc)
+ return -ENOMEM;
+ xas_set(&xas, min / IDA_BITMAP_BITS);
+ bit = min % IDA_BITMAP_BITS;
+ goto retry;
+nospc:
+ xas_unlock_irqrestore(&xas, flags);
+ kfree(alloc);
+ return -ENOSPC;
+}
+EXPORT_SYMBOL(ida_alloc_range);
+
+/**
+ * ida_free() - Release an allocated ID.
+ * @ida: IDA handle.
+ * @id: Previously allocated ID.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ */
+void ida_free(struct ida *ida, unsigned int id)
+{
+ XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
+ unsigned bit = id % IDA_BITMAP_BITS;
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
+
+ if ((int)id < 0)
+ return;
+
+ xas_lock_irqsave(&xas, flags);
+ bitmap = xas_load(&xas);
+
+ if (xa_is_value(bitmap)) {
+ unsigned long v = xa_to_value(bitmap);
+ if (bit >= BITS_PER_XA_VALUE)
+ goto err;
+ if (!(v & (1UL << bit)))
+ goto err;
+ v &= ~(1UL << bit);
+ if (!v)
+ goto delete;
+ xas_store(&xas, xa_mk_value(v));
+ } else {
+ if (!bitmap || !test_bit(bit, bitmap->bitmap))
+ goto err;
+ __clear_bit(bit, bitmap->bitmap);
+ xas_set_mark(&xas, XA_FREE_MARK);
+ if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
+ kfree(bitmap);
+delete:
+ xas_store(&xas, NULL);
+ }
+ }
+ xas_unlock_irqrestore(&xas, flags);
+ return;
+ err:
+ xas_unlock_irqrestore(&xas, flags);
+ WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
+}
+EXPORT_SYMBOL(ida_free);
+
+/**
+ * ida_destroy() - Free all IDs.
+ * @ida: IDA handle.
+ *
+ * Calling this function frees all IDs and releases all resources used
+ * by an IDA. When this call returns, the IDA is empty and can be reused
+ * or freed. If the IDA is already empty, there is no need to call this
+ * function.
+ *
+ * Context: Any context. It is safe to call this function without
+ * locking in your code.
+ */
+void ida_destroy(struct ida *ida)
+{
+ XA_STATE(xas, &ida->xa, 0);
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
+
+ xas_lock_irqsave(&xas, flags);
+ xas_for_each(&xas, bitmap, ULONG_MAX) {
+ if (!xa_is_value(bitmap))
+ kfree(bitmap);
+ xas_store(&xas, NULL);
+ }
+ xas_unlock_irqrestore(&xas, flags);
+}
+EXPORT_SYMBOL(ida_destroy);
+
+#ifndef __KERNEL__
+extern void xa_dump_index(unsigned long index, unsigned int shift);
+#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
+
+static void ida_dump_entry(void *entry, unsigned long index)
+{
+ unsigned long i;
+
+ if (!entry)
+ return;
+
+ if (xa_is_node(entry)) {
+ struct xa_node *node = xa_to_node(entry);
+ unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
+ XA_CHUNK_SHIFT;
+
+ xa_dump_index(index * IDA_BITMAP_BITS, shift);
+ xa_dump_node(node);
+ for (i = 0; i < XA_CHUNK_SIZE; i++)
+ ida_dump_entry(node->slots[i],
+ index | (i << node->shift));
+ } else if (xa_is_value(entry)) {
+ xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
+ pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
+ } else {
+ struct ida_bitmap *bitmap = entry;
+
+ xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
+ pr_cont("bitmap: %p data", bitmap);
+ for (i = 0; i < IDA_BITMAP_LONGS; i++)
+ pr_cont(" %lx", bitmap->bitmap[i]);
+ pr_cont("\n");
+ }
+}
+
+static void ida_dump(struct ida *ida)
+{
+ struct xarray *xa = &ida->xa;
+ pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
+ xa->xa_flags >> ROOT_TAG_SHIFT);
+ ida_dump_entry(xa->xa_head, 0);
+}
+#endif
diff --git a/lib/inflate.c b/lib/inflate.c
new file mode 100644
index 000000000..fbaf03c17
--- /dev/null
+++ b/lib/inflate.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+#define DEBG(x)
+#define DEBG1(x)
+/* inflate.c -- Not copyrighted 1992 by Mark Adler
+ version c10p1, 10 January 1993 */
+
+/*
+ * Adapted for booting Linux by Hannu Savolainen 1993
+ * based on gzip-1.0.3
+ *
+ * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 :
+ * Little mods for all variable to reside either into rodata or bss segments
+ * by marking constant variables with 'const' and initializing all the others
+ * at run-time only. This allows for the kernel uncompressor to run
+ * directly from Flash or ROM memory on embedded systems.
+ */
+
+/*
+ Inflate deflated (PKZIP's method 8 compressed) data. The compression
+ method searches for as much of the current string of bytes (up to a
+ length of 258) in the previous 32 K bytes. If it doesn't find any
+ matches (of at least length 3), it codes the next byte. Otherwise, it
+ codes the length of the matched string and its distance backwards from
+ the current position. There is a single Huffman code that codes both
+ single bytes (called "literals") and match lengths. A second Huffman
+ code codes the distance information, which follows a length code. Each
+ length or distance code actually represents a base value and a number
+ of "extra" (sometimes zero) bits to get to add to the base value. At
+ the end of each deflated block is a special end-of-block (EOB) literal/
+ length code. The decoding process is basically: get a literal/length
+ code; if EOB then done; if a literal, emit the decoded byte; if a
+ length then get the distance and emit the referred-to bytes from the
+ sliding window of previously emitted data.
+
+ There are (currently) three kinds of inflate blocks: stored, fixed, and
+ dynamic. The compressor deals with some chunk of data at a time, and
+ decides which method to use on a chunk-by-chunk basis. A chunk might
+ typically be 32 K or 64 K. If the chunk is incompressible, then the
+ "stored" method is used. In this case, the bytes are simply stored as
+ is, eight bits per byte, with none of the above coding. The bytes are
+ preceded by a count, since there is no longer an EOB code.
+
+ If the data is compressible, then either the fixed or dynamic methods
+ are used. In the dynamic method, the compressed data is preceded by
+ an encoding of the literal/length and distance Huffman codes that are
+ to be used to decode this block. The representation is itself Huffman
+ coded, and so is preceded by a description of that code. These code
+ descriptions take up a little space, and so for small blocks, there is
+ a predefined set of codes, called the fixed codes. The fixed method is
+ used if the block codes up smaller that way (usually for quite small
+ chunks), otherwise the dynamic method is used. In the latter case, the
+ codes are customized to the probabilities in the current block, and so
+ can code it much better than the pre-determined fixed codes.
+
+ The Huffman codes themselves are decoded using a multi-level table
+ lookup, in order to maximize the speed of decoding plus the speed of
+ building the decoding tables. See the comments below that precede the
+ lbits and dbits tuning parameters.
+ */
+
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarly, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+#include <linux/compiler.h>
+#ifdef NO_INFLATE_MALLOC
+#include <linux/slab.h>
+#endif
+
+#ifdef RCSID
+static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
+#endif
+
+#ifndef STATIC
+
+#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H)
+# include <sys/types.h>
+# include <stdlib.h>
+#endif
+
+#include "gzip.h"
+#define STATIC
+#endif /* !STATIC */
+
+#ifndef INIT
+#define INIT
+#endif
+
+#define slide window
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model).
+ Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16
+ means that v is a literal, 16 < e < 32 means that v is a pointer to
+ the next table, which codes e - 16 bits, and lastly e == 99 indicates
+ an unused code. If a code with e == 99 is looked up, this implies an
+ error in the data. */
+struct huft {
+ uch e; /* number of extra bits or operation */
+ uch b; /* number of bits in this code or subcode */
+ union {
+ ush n; /* literal, length base, or distance base */
+ struct huft *t; /* pointer to next level of table */
+ } v;
+};
+
+
+/* Function prototypes */
+STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned,
+ const ush *, const ush *, struct huft **, int *));
+STATIC int INIT huft_free OF((struct huft *));
+STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int));
+STATIC int INIT inflate_stored OF((void));
+STATIC int INIT inflate_fixed OF((void));
+STATIC int INIT inflate_dynamic OF((void));
+STATIC int INIT inflate_block OF((int *));
+STATIC int INIT inflate OF((void));
+
+
+/* The inflate algorithm uses a sliding 32 K byte window on the uncompressed
+ stream to find repeated byte strings. This is implemented here as a
+ circular buffer. The index is updated simply by incrementing and then
+ ANDing with 0x7fff (32K-1). */
+/* It is left to other modules to supply the 32 K area. It is assumed
+ to be usable as if it were declared "uch slide[32768];" or as just
+ "uch *slide;" and then malloc'ed in the latter case. The definition
+ must be in unzip.h, included above. */
+/* unsigned wp; current position in slide */
+#define wp outcnt
+#define flush_output(w) (wp=(w),flush_window())
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+static const unsigned border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* note: see note #13 above about the 258 in this list. */
+static const ush cplext[] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
+static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+static const ush cpdext[] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+
+
+/* Macros for inflate() bit peeking and grabbing.
+ The usage is:
+
+ NEEDBITS(j)
+ x = b & mask_bits[j];
+ DUMPBITS(j)
+
+ where NEEDBITS makes sure that b has at least j bits in it, and
+ DUMPBITS removes the bits from b. The macros use the variable k
+ for the number of bits in b. Normally, b and k are register
+ variables for speed, and are initialized at the beginning of a
+ routine that uses these macros from a global bit buffer and count.
+
+ If we assume that EOB will be the longest code, then we will never
+ ask for bits with NEEDBITS that are beyond the end of the stream.
+ So, NEEDBITS should not read any more bytes than are needed to
+ meet the request. Then no bytes need to be "returned" to the buffer
+ at the end of the last block.
+
+ However, this assumption is not true for fixed blocks--the EOB code
+ is 7 bits, but the other literal/length codes can be 8 or 9 bits.
+ (The EOB code is shorter than other codes because fixed blocks are
+ generally short. So, while a block always has an EOB, many other
+ literal/length codes have a significantly lower probability of
+ showing up at all.) However, by making the first table have a
+ lookup of seven bits, the EOB code will be found in that first
+ lookup, and so will not require that too many bits be pulled from
+ the stream.
+ */
+
+STATIC ulg bb; /* bit buffer */
+STATIC unsigned bk; /* bits in bit buffer */
+
+STATIC const ush mask_bits[] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+#define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; })
+#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
+#define DUMPBITS(n) {b>>=(n);k-=(n);}
+
+#ifndef NO_INFLATE_MALLOC
+/* A trivial malloc implementation, adapted from
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+
+static unsigned long malloc_ptr;
+static int malloc_count;
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size < 0)
+ error("Malloc error");
+ if (!malloc_ptr)
+ malloc_ptr = free_mem_ptr;
+
+ malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+
+ p = (void *)malloc_ptr;
+ malloc_ptr += size;
+
+ if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ malloc_count++;
+ return p;
+}
+
+static void free(void *where)
+{
+ malloc_count--;
+ if (!malloc_count)
+ malloc_ptr = free_mem_ptr;
+}
+#else
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+#endif
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+STATIC const int lbits = 9; /* bits in base literal/length lookup table */
+STATIC const int dbits = 6; /* bits in base distance lookup table */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
+#define BMAX 16 /* maximum bit length of any code (16 for explode) */
+#define N_MAX 288 /* maximum number of codes in any set */
+
+
+STATIC unsigned hufts; /* track memory usage */
+
+
+STATIC int INIT huft_build(
+ unsigned *b, /* code lengths in bits (all assumed <= BMAX) */
+ unsigned n, /* number of codes (assumed <= N_MAX) */
+ unsigned s, /* number of simple-valued codes (0..s-1) */
+ const ush *d, /* list of base values for non-simple codes */
+ const ush *e, /* list of extra bits for non-simple codes */
+ struct huft **t, /* result: starting table */
+ int *m /* maximum lookup bits, returns actual */
+ )
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return zero on success, one if
+ the given code set is incomplete (the tables are still built in this
+ case), two if the input is invalid (all zero length codes or an
+ oversubscribed set of lengths), and three if not enough memory. */
+{
+ unsigned a; /* counter for codes of length k */
+ unsigned f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register unsigned i; /* counter, current code */
+ register unsigned j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ register unsigned *p; /* pointer into c[], b[], or v[] */
+ register struct huft *q; /* points to current table */
+ struct huft r; /* table entry for structure assignment */
+ register int w; /* bits before this table == (l * h) */
+ unsigned *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ unsigned z; /* number of entries in current table */
+ struct {
+ unsigned c[BMAX+1]; /* bit length count table */
+ struct huft *u[BMAX]; /* table stack */
+ unsigned v[N_MAX]; /* values in order of bit length */
+ unsigned x[BMAX+1]; /* bit offsets, then code stack */
+ } *stk;
+ unsigned *c, *v, *x;
+ struct huft **u;
+ int ret;
+
+DEBG("huft1 ");
+
+ stk = malloc(sizeof(*stk));
+ if (stk == NULL)
+ return 3; /* out of memory */
+
+ c = stk->c;
+ v = stk->v;
+ x = stk->x;
+ u = stk->u;
+
+ /* Generate counts for each bit length */
+ memzero(stk->c, sizeof(stk->c));
+ p = b; i = n;
+ do {
+ Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"),
+ n-i, *p));
+ c[*p]++; /* assume all entries <= BMAX */
+ p++; /* Can't combine with above line (Solaris bug) */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (struct huft *)NULL;
+ *m = 0;
+ ret = 2;
+ goto out;
+ }
+
+DEBG("huft2 ");
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((unsigned)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((unsigned)l > i)
+ l = i;
+ *m = l;
+
+DEBG("huft3 ");
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0) {
+ ret = 2; /* bad input: more codes than bits */
+ goto out;
+ }
+ if ((y -= c[i]) < 0) {
+ ret = 2;
+ goto out;
+ }
+ c[i] += y;
+
+DEBG("huft4 ");
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+DEBG("huft5 ");
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+ n = x[g]; /* set n to length of v */
+
+DEBG("h6 ");
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (struct huft *)NULL; /* just to keep compilers happy */
+ q = (struct huft *)NULL; /* ditto */
+ z = 0; /* ditto */
+DEBG("h6a ");
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+DEBG("h6b ");
+ a = c[k];
+ while (a--)
+ {
+DEBG("h6b1 ");
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+DEBG1("1 ");
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+DEBG1("2 ");
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+DEBG1("3 ");
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate and link in new table */
+ if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
+ (struct huft *)NULL)
+ {
+ if (h)
+ huft_free(u[0]);
+ ret = 3; /* not enough memory */
+ goto out;
+ }
+DEBG1("4 ");
+ hufts += z + 1; /* track memory usage */
+ *t = q + 1; /* link to list for huft_free() */
+ *(t = &(q->v.t)) = (struct huft *)NULL;
+ u[h] = ++q; /* table starts after link */
+
+DEBG1("5 ");
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.b = (uch)l; /* bits to dump before this table */
+ r.e = (uch)(16 + j); /* bits in this table */
+ r.v.t = q; /* pointer to this table */
+ j = i >> (w - l); /* (get around Turbo C bug) */
+ u[h-1][j] = r; /* connect to last table */
+ }
+DEBG1("6 ");
+ }
+DEBG("h6c ");
+
+ /* set up table entry in r */
+ r.b = (uch)(k - w);
+ if (p >= v + n)
+ r.e = 99; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */
+ r.v.n = (ush)(*p); /* simple code is just the value */
+ p++; /* one compiler does not like *p++ */
+ }
+ else
+ {
+ r.e = (uch)e[*p - s]; /* non-simple--look up in lists */
+ r.v.n = d[*p++ - s];
+ }
+DEBG("h6d ");
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ while ((i & ((1 << w) - 1)) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ }
+DEBG("h6e ");
+ }
+DEBG("h6f ");
+ }
+
+DEBG("huft7 ");
+
+ /* Return true (1) if we were given an incomplete table */
+ ret = y != 0 && g != 1;
+
+ out:
+ free(stk);
+ return ret;
+}
+
+
+
+STATIC int INIT huft_free(
+ struct huft *t /* table to free */
+ )
+/* Free the malloc'ed tables built by huft_build(), which makes a linked
+ list of the tables it made, with the links in a dummy first entry of
+ each table. */
+{
+ register struct huft *p, *q;
+
+
+ /* Go through linked list, freeing from the malloced (t[-1]) address. */
+ p = t;
+ while (p != (struct huft *)NULL)
+ {
+ q = (--p)->v.t;
+ free((char*)p);
+ p = q;
+ }
+ return 0;
+}
+
+
+STATIC int INIT inflate_codes(
+ struct huft *tl, /* literal/length decoder tables */
+ struct huft *td, /* distance decoder tables */
+ int bl, /* number of bits decoded by tl[] */
+ int bd /* number of bits decoded by td[] */
+ )
+/* inflate (decompress) the codes in a deflated (compressed) block.
+ Return an error code or zero if it all goes ok. */
+{
+ register unsigned e; /* table entry flag/number of extra bits */
+ unsigned n, d; /* length and index for copy */
+ unsigned w; /* current window position */
+ struct huft *t; /* pointer to table entry */
+ unsigned ml, md; /* masks for bl and bd bits */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+
+
+ /* make local copies of globals */
+ b = bb; /* initialize bit buffer */
+ k = bk;
+ w = wp; /* initialize window position */
+
+ /* inflate the coded data */
+ ml = mask_bits[bl]; /* precompute masks for speed */
+ md = mask_bits[bd];
+ for (;;) /* do until end of block */
+ {
+ NEEDBITS((unsigned)bl)
+ if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
+ do {
+ if (e == 99)
+ return 1;
+ DUMPBITS(t->b)
+ e -= 16;
+ NEEDBITS(e)
+ } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
+ DUMPBITS(t->b)
+ if (e == 16) /* then it's a literal */
+ {
+ slide[w++] = (uch)t->v.n;
+ Tracevv((stderr, "%c", slide[w-1]));
+ if (w == WSIZE)
+ {
+ flush_output(w);
+ w = 0;
+ }
+ }
+ else /* it's an EOB or a length */
+ {
+ /* exit if end of block */
+ if (e == 15)
+ break;
+
+ /* get length of block to copy */
+ NEEDBITS(e)
+ n = t->v.n + ((unsigned)b & mask_bits[e]);
+ DUMPBITS(e);
+
+ /* decode distance of block to copy */
+ NEEDBITS((unsigned)bd)
+ if ((e = (t = td + ((unsigned)b & md))->e) > 16)
+ do {
+ if (e == 99)
+ return 1;
+ DUMPBITS(t->b)
+ e -= 16;
+ NEEDBITS(e)
+ } while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
+ DUMPBITS(t->b)
+ NEEDBITS(e)
+ d = w - t->v.n - ((unsigned)b & mask_bits[e]);
+ DUMPBITS(e)
+ Tracevv((stderr,"\\[%d,%d]", w-d, n));
+
+ /* do the copy */
+ do {
+ n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e);
+#if !defined(NOMEMCPY) && !defined(DEBUG)
+ if (w - d >= e) /* (this test assumes unsigned comparison) */
+ {
+ memcpy(slide + w, slide + d, e);
+ w += e;
+ d += e;
+ }
+ else /* do it slow to avoid memcpy() overlap */
+#endif /* !NOMEMCPY */
+ do {
+ slide[w++] = slide[d++];
+ Tracevv((stderr, "%c", slide[w-1]));
+ } while (--e);
+ if (w == WSIZE)
+ {
+ flush_output(w);
+ w = 0;
+ }
+ } while (n);
+ }
+ }
+
+
+ /* restore the globals from the locals */
+ wp = w; /* restore global window pointer */
+ bb = b; /* restore global bit buffer */
+ bk = k;
+
+ /* done */
+ return 0;
+
+ underrun:
+ return 4; /* Input underrun */
+}
+
+
+
+STATIC int INIT inflate_stored(void)
+/* "decompress" an inflated type 0 (stored) block. */
+{
+ unsigned n; /* number of bytes in block */
+ unsigned w; /* current window position */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+
+DEBG("<stor");
+
+ /* make local copies of globals */
+ b = bb; /* initialize bit buffer */
+ k = bk;
+ w = wp; /* initialize window position */
+
+
+ /* go to byte boundary */
+ n = k & 7;
+ DUMPBITS(n);
+
+
+ /* get the length and its complement */
+ NEEDBITS(16)
+ n = ((unsigned)b & 0xffff);
+ DUMPBITS(16)
+ NEEDBITS(16)
+ if (n != (unsigned)((~b) & 0xffff))
+ return 1; /* error in compressed data */
+ DUMPBITS(16)
+
+
+ /* read and output the compressed data */
+ while (n--)
+ {
+ NEEDBITS(8)
+ slide[w++] = (uch)b;
+ if (w == WSIZE)
+ {
+ flush_output(w);
+ w = 0;
+ }
+ DUMPBITS(8)
+ }
+
+
+ /* restore the globals from the locals */
+ wp = w; /* restore global window pointer */
+ bb = b; /* restore global bit buffer */
+ bk = k;
+
+ DEBG(">");
+ return 0;
+
+ underrun:
+ return 4; /* Input underrun */
+}
+
+
+/*
+ * We use `noinline' here to prevent gcc-3.5 from using too much stack space
+ */
+STATIC int noinline INIT inflate_fixed(void)
+/* decompress an inflated type 1 (fixed Huffman codes) block. We should
+ either replace this with a custom decoder, or at least precompute the
+ Huffman tables. */
+{
+ int i; /* temporary variable */
+ struct huft *tl; /* literal/length code table */
+ struct huft *td; /* distance code table */
+ int bl; /* lookup bits for tl */
+ int bd; /* lookup bits for td */
+ unsigned *l; /* length list for huft_build */
+
+DEBG("<fix");
+
+ l = malloc(sizeof(*l) * 288);
+ if (l == NULL)
+ return 3; /* out of memory */
+
+ /* set up literal table */
+ for (i = 0; i < 144; i++)
+ l[i] = 8;
+ for (; i < 256; i++)
+ l[i] = 9;
+ for (; i < 280; i++)
+ l[i] = 7;
+ for (; i < 288; i++) /* make a complete, but wrong code set */
+ l[i] = 8;
+ bl = 7;
+ if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) {
+ free(l);
+ return i;
+ }
+
+ /* set up distance table */
+ for (i = 0; i < 30; i++) /* make an incomplete code set */
+ l[i] = 5;
+ bd = 5;
+ if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1)
+ {
+ huft_free(tl);
+ free(l);
+
+ DEBG(">");
+ return i;
+ }
+
+
+ /* decompress until an end-of-block code */
+ if (inflate_codes(tl, td, bl, bd)) {
+ free(l);
+ return 1;
+ }
+
+ /* free the decoding tables, return */
+ free(l);
+ huft_free(tl);
+ huft_free(td);
+ return 0;
+}
+
+
+/*
+ * We use `noinline' here to prevent gcc-3.5 from using too much stack space
+ */
+STATIC int noinline INIT inflate_dynamic(void)
+/* decompress an inflated type 2 (dynamic Huffman codes) block. */
+{
+ int i; /* temporary variables */
+ unsigned j;
+ unsigned l; /* last length */
+ unsigned m; /* mask for bit lengths table */
+ unsigned n; /* number of lengths to get */
+ struct huft *tl; /* literal/length code table */
+ struct huft *td; /* distance code table */
+ int bl; /* lookup bits for tl */
+ int bd; /* lookup bits for td */
+ unsigned nb; /* number of bit length codes */
+ unsigned nl; /* number of literal/length codes */
+ unsigned nd; /* number of distance codes */
+ unsigned *ll; /* literal/length and distance code lengths */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+ int ret;
+
+DEBG("<dyn");
+
+#ifdef PKZIP_BUG_WORKAROUND
+ ll = malloc(sizeof(*ll) * (288+32)); /* literal/length and distance code lengths */
+#else
+ ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */
+#endif
+
+ if (ll == NULL)
+ return 1;
+
+ /* make local bit buffer */
+ b = bb;
+ k = bk;
+
+
+ /* read in table lengths */
+ NEEDBITS(5)
+ nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */
+ DUMPBITS(5)
+ NEEDBITS(5)
+ nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */
+ DUMPBITS(5)
+ NEEDBITS(4)
+ nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */
+ DUMPBITS(4)
+#ifdef PKZIP_BUG_WORKAROUND
+ if (nl > 288 || nd > 32)
+#else
+ if (nl > 286 || nd > 30)
+#endif
+ {
+ ret = 1; /* bad lengths */
+ goto out;
+ }
+
+DEBG("dyn1 ");
+
+ /* read in bit-length-code lengths */
+ for (j = 0; j < nb; j++)
+ {
+ NEEDBITS(3)
+ ll[border[j]] = (unsigned)b & 7;
+ DUMPBITS(3)
+ }
+ for (; j < 19; j++)
+ ll[border[j]] = 0;
+
+DEBG("dyn2 ");
+
+ /* build decoding table for trees--single level, 7 bit lookup */
+ bl = 7;
+ if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0)
+ {
+ if (i == 1)
+ huft_free(tl);
+ ret = i; /* incomplete code set */
+ goto out;
+ }
+
+DEBG("dyn3 ");
+
+ /* read in literal and distance code lengths */
+ n = nl + nd;
+ m = mask_bits[bl];
+ i = l = 0;
+ while ((unsigned)i < n)
+ {
+ NEEDBITS((unsigned)bl)
+ j = (td = tl + ((unsigned)b & m))->b;
+ DUMPBITS(j)
+ j = td->v.n;
+ if (j < 16) /* length of code in bits (0..15) */
+ ll[i++] = l = j; /* save last length in l */
+ else if (j == 16) /* repeat last length 3 to 6 times */
+ {
+ NEEDBITS(2)
+ j = 3 + ((unsigned)b & 3);
+ DUMPBITS(2)
+ if ((unsigned)i + j > n) {
+ ret = 1;
+ goto out;
+ }
+ while (j--)
+ ll[i++] = l;
+ }
+ else if (j == 17) /* 3 to 10 zero length codes */
+ {
+ NEEDBITS(3)
+ j = 3 + ((unsigned)b & 7);
+ DUMPBITS(3)
+ if ((unsigned)i + j > n) {
+ ret = 1;
+ goto out;
+ }
+ while (j--)
+ ll[i++] = 0;
+ l = 0;
+ }
+ else /* j == 18: 11 to 138 zero length codes */
+ {
+ NEEDBITS(7)
+ j = 11 + ((unsigned)b & 0x7f);
+ DUMPBITS(7)
+ if ((unsigned)i + j > n) {
+ ret = 1;
+ goto out;
+ }
+ while (j--)
+ ll[i++] = 0;
+ l = 0;
+ }
+ }
+
+DEBG("dyn4 ");
+
+ /* free decoding table for trees */
+ huft_free(tl);
+
+DEBG("dyn5 ");
+
+ /* restore the global bit buffer */
+ bb = b;
+ bk = k;
+
+DEBG("dyn5a ");
+
+ /* build the decoding tables for literal/length and distance codes */
+ bl = lbits;
+ if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0)
+ {
+DEBG("dyn5b ");
+ if (i == 1) {
+ error("incomplete literal tree");
+ huft_free(tl);
+ }
+ ret = i; /* incomplete code set */
+ goto out;
+ }
+DEBG("dyn5c ");
+ bd = dbits;
+ if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0)
+ {
+DEBG("dyn5d ");
+ if (i == 1) {
+ error("incomplete distance tree");
+#ifdef PKZIP_BUG_WORKAROUND
+ i = 0;
+ }
+#else
+ huft_free(td);
+ }
+ huft_free(tl);
+ ret = i; /* incomplete code set */
+ goto out;
+#endif
+ }
+
+DEBG("dyn6 ");
+
+ /* decompress until an end-of-block code */
+ if (inflate_codes(tl, td, bl, bd)) {
+ ret = 1;
+ goto out;
+ }
+
+DEBG("dyn7 ");
+
+ /* free the decoding tables, return */
+ huft_free(tl);
+ huft_free(td);
+
+ DEBG(">");
+ ret = 0;
+out:
+ free(ll);
+ return ret;
+
+underrun:
+ ret = 4; /* Input underrun */
+ goto out;
+}
+
+
+
+STATIC int INIT inflate_block(
+ int *e /* last block flag */
+ )
+/* decompress an inflated block */
+{
+ unsigned t; /* block type */
+ register ulg b; /* bit buffer */
+ register unsigned k; /* number of bits in bit buffer */
+
+ DEBG("<blk");
+
+ /* make local bit buffer */
+ b = bb;
+ k = bk;
+
+
+ /* read in last block bit */
+ NEEDBITS(1)
+ *e = (int)b & 1;
+ DUMPBITS(1)
+
+
+ /* read in block type */
+ NEEDBITS(2)
+ t = (unsigned)b & 3;
+ DUMPBITS(2)
+
+
+ /* restore the global bit buffer */
+ bb = b;
+ bk = k;
+
+ /* inflate that block type */
+ if (t == 2)
+ return inflate_dynamic();
+ if (t == 0)
+ return inflate_stored();
+ if (t == 1)
+ return inflate_fixed();
+
+ DEBG(">");
+
+ /* bad block type */
+ return 2;
+
+ underrun:
+ return 4; /* Input underrun */
+}
+
+
+
+STATIC int INIT inflate(void)
+/* decompress an inflated entry */
+{
+ int e; /* last block flag */
+ int r; /* result code */
+ unsigned h; /* maximum struct huft's malloc'ed */
+
+ /* initialize window, bit buffer */
+ wp = 0;
+ bk = 0;
+ bb = 0;
+
+
+ /* decompress until the last block */
+ h = 0;
+ do {
+ hufts = 0;
+#ifdef ARCH_HAS_DECOMP_WDOG
+ arch_decomp_wdog();
+#endif
+ r = inflate_block(&e);
+ if (r)
+ return r;
+ if (hufts > h)
+ h = hufts;
+ } while (!e);
+
+ /* Undo too much lookahead. The next read will be byte aligned so we
+ * can discard unused bits in the last meaningful byte.
+ */
+ while (bk >= 8) {
+ bk -= 8;
+ inptr--;
+ }
+
+ /* flush out slide */
+ flush_output(wp);
+
+
+ /* return success */
+#ifdef DEBUG
+ fprintf(stderr, "<%u> ", h);
+#endif /* DEBUG */
+ return 0;
+}
+
+/**********************************************************************
+ *
+ * The following are support routines for inflate.c
+ *
+ **********************************************************************/
+
+static ulg crc_32_tab[256];
+static ulg crc; /* initialized in makecrc() so it'll reside in bss */
+#define CRC_VALUE (crc ^ 0xffffffffUL)
+
+/*
+ * Code to compute the CRC-32 table. Borrowed from
+ * gzip-1.0.3/makecrc.c.
+ */
+
+static void INIT
+makecrc(void)
+{
+/* Not copyrighted 1990 Mark Adler */
+
+ unsigned long c; /* crc shift register */
+ unsigned long e; /* polynomial exclusive-or pattern */
+ int i; /* counter for all possible eight bit values */
+ int k; /* byte being shifted into crc apparatus */
+
+ /* terms of polynomial defining this crc (except x^32): */
+ static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
+
+ /* Make exclusive-or pattern from polynomial */
+ e = 0;
+ for (i = 0; i < sizeof(p)/sizeof(int); i++)
+ e |= 1L << (31 - p[i]);
+
+ crc_32_tab[0] = 0;
+
+ for (i = 1; i < 256; i++)
+ {
+ c = 0;
+ for (k = i | 256; k != 1; k >>= 1)
+ {
+ c = c & 1 ? (c >> 1) ^ e : c >> 1;
+ if (k & 1)
+ c ^= e;
+ }
+ crc_32_tab[i] = c;
+ }
+
+ /* this is initialized here so this code could reside in ROM */
+ crc = (ulg)0xffffffffUL; /* shift register contents */
+}
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+/*
+ * Do the uncompression!
+ */
+static int INIT gunzip(void)
+{
+ uch flags;
+ unsigned char magic[2]; /* magic header */
+ char method;
+ ulg orig_crc = 0; /* original crc */
+ ulg orig_len = 0; /* original uncompressed length */
+ int res;
+
+ magic[0] = NEXTBYTE();
+ magic[1] = NEXTBYTE();
+ method = NEXTBYTE();
+
+ if (magic[0] != 037 ||
+ ((magic[1] != 0213) && (magic[1] != 0236))) {
+ error("bad gzip magic numbers");
+ return -1;
+ }
+
+ /* We only support method #8, DEFLATED */
+ if (method != 8) {
+ error("internal error, invalid method");
+ return -1;
+ }
+
+ flags = (uch)get_byte();
+ if ((flags & ENCRYPTED) != 0) {
+ error("Input is encrypted");
+ return -1;
+ }
+ if ((flags & CONTINUATION) != 0) {
+ error("Multi part input");
+ return -1;
+ }
+ if ((flags & RESERVED) != 0) {
+ error("Input has invalid flags");
+ return -1;
+ }
+ NEXTBYTE(); /* Get timestamp */
+ NEXTBYTE();
+ NEXTBYTE();
+ NEXTBYTE();
+
+ (void)NEXTBYTE(); /* Ignore extra flags for the moment */
+ (void)NEXTBYTE(); /* Ignore OS type for the moment */
+
+ if ((flags & EXTRA_FIELD) != 0) {
+ unsigned len = (unsigned)NEXTBYTE();
+ len |= ((unsigned)NEXTBYTE())<<8;
+ while (len--) (void)NEXTBYTE();
+ }
+
+ /* Get original file name if it was truncated */
+ if ((flags & ORIG_NAME) != 0) {
+ /* Discard the old name */
+ while (NEXTBYTE() != 0) /* null */ ;
+ }
+
+ /* Discard file comment if any */
+ if ((flags & COMMENT) != 0) {
+ while (NEXTBYTE() != 0) /* null */ ;
+ }
+
+ /* Decompress */
+ if ((res = inflate())) {
+ switch (res) {
+ case 0:
+ break;
+ case 1:
+ error("invalid compressed format (err=1)");
+ break;
+ case 2:
+ error("invalid compressed format (err=2)");
+ break;
+ case 3:
+ error("out of memory");
+ break;
+ case 4:
+ error("out of input data");
+ break;
+ default:
+ error("invalid compressed format (other)");
+ }
+ return -1;
+ }
+
+ /* Get the crc and original length */
+ /* crc32 (see algorithm.doc)
+ * uncompressed input size modulo 2^32
+ */
+ orig_crc = (ulg) NEXTBYTE();
+ orig_crc |= (ulg) NEXTBYTE() << 8;
+ orig_crc |= (ulg) NEXTBYTE() << 16;
+ orig_crc |= (ulg) NEXTBYTE() << 24;
+
+ orig_len = (ulg) NEXTBYTE();
+ orig_len |= (ulg) NEXTBYTE() << 8;
+ orig_len |= (ulg) NEXTBYTE() << 16;
+ orig_len |= (ulg) NEXTBYTE() << 24;
+
+ /* Validate decompression */
+ if (orig_crc != CRC_VALUE) {
+ error("crc error");
+ return -1;
+ }
+ if (orig_len != bytes_out) {
+ error("length error");
+ return -1;
+ }
+ return 0;
+
+ underrun: /* NEXTBYTE() goto's here if needed */
+ error("out of input data");
+ return -1;
+}
+
+
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
new file mode 100644
index 000000000..3412737ff
--- /dev/null
+++ b/lib/interval_tree.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/interval_tree.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
+ unsigned long, __subtree_last,
+ START, LAST,, interval_tree)
+
+EXPORT_SYMBOL_GPL(interval_tree_insert);
+EXPORT_SYMBOL_GPL(interval_tree_remove);
+EXPORT_SYMBOL_GPL(interval_tree_iter_first);
+EXPORT_SYMBOL_GPL(interval_tree_iter_next);
+
+#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
+/*
+ * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
+ * span of nodes. This makes nodes[0]->last the end of that contiguous used span
+ * indexes that started at the original nodes[1]->start. nodes[1] is now the
+ * first node starting the next used span. A hole span is between nodes[0]->last
+ * and nodes[1]->start. nodes[1] must be !NULL.
+ */
+static void
+interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
+{
+ struct interval_tree_node *cur = state->nodes[1];
+
+ state->nodes[0] = cur;
+ do {
+ if (cur->last > state->nodes[0]->last)
+ state->nodes[0] = cur;
+ cur = interval_tree_iter_next(cur, state->first_index,
+ state->last_index);
+ } while (cur && (state->nodes[0]->last >= cur->start ||
+ state->nodes[0]->last + 1 == cur->start));
+ state->nodes[1] = cur;
+}
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index)
+{
+ iter->first_index = first_index;
+ iter->last_index = last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] =
+ interval_tree_iter_first(itree, first_index, last_index);
+ if (!iter->nodes[1]) {
+ /* No nodes intersect the span, whole span is hole */
+ iter->start_hole = first_index;
+ iter->last_hole = last_index;
+ iter->is_hole = 1;
+ return;
+ }
+ if (iter->nodes[1]->start > first_index) {
+ /* Leading hole on first iteration */
+ iter->start_hole = first_index;
+ iter->last_hole = iter->nodes[1]->start - 1;
+ iter->is_hole = 1;
+ interval_tree_span_iter_next_gap(iter);
+ return;
+ }
+
+ /* Starting inside a used */
+ iter->start_used = first_index;
+ iter->is_hole = 0;
+ interval_tree_span_iter_next_gap(iter);
+ iter->last_used = iter->nodes[0]->last;
+ if (iter->last_used >= last_index) {
+ iter->last_used = last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_first);
+
+void interval_tree_span_iter_next(struct interval_tree_span_iter *iter)
+{
+ if (!iter->nodes[0] && !iter->nodes[1]) {
+ iter->is_hole = -1;
+ return;
+ }
+
+ if (iter->is_hole) {
+ iter->start_used = iter->last_hole + 1;
+ iter->last_used = iter->nodes[0]->last;
+ if (iter->last_used >= iter->last_index) {
+ iter->last_used = iter->last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] = NULL;
+ }
+ iter->is_hole = 0;
+ return;
+ }
+
+ if (!iter->nodes[1]) {
+ /* Trailing hole */
+ iter->start_hole = iter->nodes[0]->last + 1;
+ iter->last_hole = iter->last_index;
+ iter->nodes[0] = NULL;
+ iter->is_hole = 1;
+ return;
+ }
+
+ /* must have both nodes[0] and [1], interior hole */
+ iter->start_hole = iter->nodes[0]->last + 1;
+ iter->last_hole = iter->nodes[1]->start - 1;
+ iter->is_hole = 1;
+ interval_tree_span_iter_next_gap(iter);
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_next);
+
+/*
+ * Advance the iterator index to a specific position. The returned used/hole is
+ * updated to start at new_index. This is faster than calling
+ * interval_tree_span_iter_first() as it can avoid full searches in several
+ * cases where the iterator is already set.
+ */
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index)
+{
+ if (iter->is_hole == -1)
+ return;
+
+ iter->first_index = new_index;
+ if (new_index > iter->last_index) {
+ iter->is_hole = -1;
+ return;
+ }
+
+ /* Rely on the union aliasing hole/used */
+ if (iter->start_hole <= new_index && new_index <= iter->last_hole) {
+ iter->start_hole = new_index;
+ return;
+ }
+ if (new_index == iter->last_hole + 1)
+ interval_tree_span_iter_next(iter);
+ else
+ interval_tree_span_iter_first(iter, itree, new_index,
+ iter->last_index);
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_advance);
+#endif
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
new file mode 100644
index 000000000..f37f4d44f
--- /dev/null
+++ b/lib/interval_tree_test.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interval_tree.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <asm/timex.h>
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+__param(int, nnodes, 100, "Number of nodes in the interval tree");
+__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
+
+__param(int, nsearches, 100, "Number of searches to the interval tree");
+__param(int, search_loops, 1000, "Number of iterations searching the tree");
+__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
+
+__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
+
+static struct rb_root_cached root = RB_ROOT_CACHED;
+static struct interval_tree_node *nodes = NULL;
+static u32 *queries = NULL;
+
+static struct rnd_state rnd;
+
+static inline unsigned long
+search(struct rb_root_cached *root, unsigned long start, unsigned long last)
+{
+ struct interval_tree_node *node;
+ unsigned long results = 0;
+
+ for (node = interval_tree_iter_first(root, start, last); node;
+ node = interval_tree_iter_next(node, start, last))
+ results++;
+ return results;
+}
+
+static void init(void)
+{
+ int i;
+
+ for (i = 0; i < nnodes; i++) {
+ u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ u32 a = (prandom_u32_state(&rnd) >> 4) % b;
+
+ nodes[i].start = a;
+ nodes[i].last = b;
+ }
+
+ /*
+ * Limit the search scope to what the user defined.
+ * Otherwise we are merely measuring empty walks,
+ * which is pointless.
+ */
+ for (i = 0; i < nsearches; i++)
+ queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+}
+
+static int interval_tree_test_init(void)
+{
+ int i, j;
+ unsigned long results;
+ cycles_t time1, time2, time;
+
+ nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node),
+ GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL);
+ if (!queries) {
+ kfree(nodes);
+ return -ENOMEM;
+ }
+
+ printk(KERN_ALERT "interval tree insert/remove");
+
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
+ interval_tree_insert(nodes + j, &root);
+ for (j = 0; j < nnodes; j++)
+ interval_tree_remove(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> %llu cycles\n", (unsigned long long)time);
+
+ printk(KERN_ALERT "interval tree search");
+
+ for (j = 0; j < nnodes; j++)
+ interval_tree_insert(nodes + j, &root);
+
+ time1 = get_cycles();
+
+ results = 0;
+ for (i = 0; i < search_loops; i++)
+ for (j = 0; j < nsearches; j++) {
+ unsigned long start = search_all ? 0 : queries[j];
+ unsigned long last = search_all ? max_endpoint : queries[j];
+
+ results += search(&root, start, last);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, search_loops);
+ results = div_u64(results, search_loops);
+ printk(" -> %llu cycles (%lu results)\n",
+ (unsigned long long)time, results);
+
+ kfree(queries);
+ kfree(nodes);
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void interval_tree_test_exit(void)
+{
+ printk(KERN_ALERT "test exit\n");
+}
+
+module_init(interval_tree_test_init)
+module_exit(interval_tree_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michel Lespinasse");
+MODULE_DESCRIPTION("Interval Tree test");
diff --git a/lib/iomap.c b/lib/iomap.c
new file mode 100644
index 000000000..4f8b31baa
--- /dev/null
+++ b/lib/iomap.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/kmsan-checks.h>
+
+#include <linux/export.h>
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines don't assume any hardware mappings, and just
+ * encode the PIO/MMIO as part of the cookie. They coldly assume that
+ * the MMIO IO mappings are not in the low address range.
+ *
+ * Architectures for which this is not true can't use this generic
+ * implementation and should do their own copy.
+ */
+
+#ifndef HAVE_ARCH_PIO_SIZE
+/*
+ * We encode the physical PIO addresses (0-0xffff) into the
+ * pointer by offsetting them with a constant (0x10000) and
+ * assuming that all the low addresses are always PIO. That means
+ * we can do some sanity checks on the low bits, and don't
+ * need to just take things for granted.
+ */
+#define PIO_OFFSET 0x10000UL
+#define PIO_MASK 0x0ffffUL
+#define PIO_RESERVED 0x40000UL
+#endif
+
+static void bad_io_access(unsigned long port, const char *access)
+{
+ static int count = 10;
+ if (count) {
+ count--;
+ WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
+ }
+}
+
+/*
+ * Ugly macros are a way of life.
+ */
+#define IO_COND(addr, is_pio, is_mmio) do { \
+ unsigned long port = (unsigned long __force)addr; \
+ if (port >= PIO_RESERVED) { \
+ is_mmio; \
+ } else if (port > PIO_OFFSET) { \
+ port &= PIO_MASK; \
+ is_pio; \
+ } else \
+ bad_io_access(port, #is_pio ); \
+} while (0)
+
+#ifndef pio_read16be
+#define pio_read16be(port) swab16(inw(port))
+#define pio_read32be(port) swab32(inl(port))
+#endif
+
+#ifndef mmio_read16be
+#define mmio_read16be(addr) swab16(readw(addr))
+#define mmio_read32be(addr) swab32(readl(addr))
+#define mmio_read64be(addr) swab64(readq(addr))
+#endif
+
+/*
+ * Here and below, we apply __no_kmsan_checks to functions reading data from
+ * hardware, to ensure that KMSAN marks their return values as initialized.
+ */
+__no_kmsan_checks
+unsigned int ioread8(const void __iomem *addr)
+{
+ IO_COND(addr, return inb(port), return readb(addr));
+ return 0xff;
+}
+__no_kmsan_checks
+unsigned int ioread16(const void __iomem *addr)
+{
+ IO_COND(addr, return inw(port), return readw(addr));
+ return 0xffff;
+}
+__no_kmsan_checks
+unsigned int ioread16be(const void __iomem *addr)
+{
+ IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
+ return 0xffff;
+}
+__no_kmsan_checks
+unsigned int ioread32(const void __iomem *addr)
+{
+ IO_COND(addr, return inl(port), return readl(addr));
+ return 0xffffffff;
+}
+__no_kmsan_checks
+unsigned int ioread32be(const void __iomem *addr)
+{
+ IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
+ return 0xffffffff;
+}
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread16be);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(ioread32be);
+
+#ifdef readq
+static u64 pio_read64_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = inl(port);
+ hi = inl(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = inl(port + sizeof(u32));
+ lo = inl(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = pio_read32be(port + sizeof(u32));
+ hi = pio_read32be(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = pio_read32be(port);
+ lo = pio_read32be(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+__no_kmsan_checks
+u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+__no_kmsan_checks
+u64 ioread64_hi_lo(const void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+__no_kmsan_checks
+u64 ioread64be_lo_hi(const void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_lo_hi(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+__no_kmsan_checks
+u64 ioread64be_hi_lo(const void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_hi_lo(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+EXPORT_SYMBOL(ioread64_lo_hi);
+EXPORT_SYMBOL(ioread64_hi_lo);
+EXPORT_SYMBOL(ioread64be_lo_hi);
+EXPORT_SYMBOL(ioread64be_hi_lo);
+
+#endif /* readq */
+
+#ifndef pio_write16be
+#define pio_write16be(val,port) outw(swab16(val),port)
+#define pio_write32be(val,port) outl(swab32(val),port)
+#endif
+
+#ifndef mmio_write16be
+#define mmio_write16be(val,port) writew(swab16(val),port)
+#define mmio_write32be(val,port) writel(swab32(val),port)
+#define mmio_write64be(val,port) writeq(swab64(val),port)
+#endif
+
+void iowrite8(u8 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, outb(val,port), writeb(val, addr));
+}
+void iowrite16(u16 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, outw(val,port), writew(val, addr));
+}
+void iowrite16be(u16 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
+}
+void iowrite32(u32 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, outl(val,port), writel(val, addr));
+}
+void iowrite32be(u32 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
+}
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite16be);
+EXPORT_SYMBOL(iowrite32);
+EXPORT_SYMBOL(iowrite32be);
+
+#ifdef writeq
+static void pio_write64_lo_hi(u64 val, unsigned long port)
+{
+ outl(val, port);
+ outl(val >> 32, port + sizeof(u32));
+}
+
+static void pio_write64_hi_lo(u64 val, unsigned long port)
+{
+ outl(val >> 32, port + sizeof(u32));
+ outl(val, port);
+}
+
+static void pio_write64be_lo_hi(u64 val, unsigned long port)
+{
+ pio_write32be(val, port + sizeof(u32));
+ pio_write32be(val >> 32, port);
+}
+
+static void pio_write64be_hi_lo(u64 val, unsigned long port)
+{
+ pio_write32be(val >> 32, port);
+ pio_write32be(val, port + sizeof(u32));
+}
+
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, pio_write64_lo_hi(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, pio_write64_hi_lo(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, pio_write64be_lo_hi(val, port),
+ mmio_write64be(val, addr));
+}
+
+void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
+ IO_COND(addr, pio_write64be_hi_lo(val, port),
+ mmio_write64be(val, addr));
+}
+
+EXPORT_SYMBOL(iowrite64_lo_hi);
+EXPORT_SYMBOL(iowrite64_hi_lo);
+EXPORT_SYMBOL(iowrite64be_lo_hi);
+EXPORT_SYMBOL(iowrite64be_hi_lo);
+
+#endif /* readq */
+
+/*
+ * These are the "repeat MMIO read/write" functions.
+ * Note the "__raw" accesses, since we don't want to
+ * convert to CPU byte order. We write in "IO byte
+ * order" (we also don't have IO barriers).
+ */
+#ifndef mmio_insb
+static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
+{
+ while (--count >= 0) {
+ u8 data = __raw_readb(addr);
+ *dst = data;
+ dst++;
+ }
+}
+static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
+{
+ while (--count >= 0) {
+ u16 data = __raw_readw(addr);
+ *dst = data;
+ dst++;
+ }
+}
+static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
+{
+ while (--count >= 0) {
+ u32 data = __raw_readl(addr);
+ *dst = data;
+ dst++;
+ }
+}
+#endif
+
+#ifndef mmio_outsb
+static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writeb(*src, addr);
+ src++;
+ }
+}
+static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writew(*src, addr);
+ src++;
+ }
+}
+static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
+{
+ while (--count >= 0) {
+ __raw_writel(*src, addr);
+ src++;
+ }
+}
+#endif
+
+void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
+{
+ IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count);
+}
+void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
+{
+ IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count * 2);
+}
+void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
+{
+ IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count * 4);
+}
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(ioread32_rep);
+
+void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count);
+ IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
+}
+void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count * 2);
+ IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
+}
+void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
+{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count * 4);
+ IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
+}
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(iowrite32_rep);
+
+#ifdef CONFIG_HAS_IOPORT_MAP
+/* Create a virtual mapping cookie for an IO port range */
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ if (port > PIO_MASK)
+ return NULL;
+ return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+ /* Nothing to do */
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+#endif /* CONFIG_HAS_IOPORT_MAP */
+
+#ifdef CONFIG_PCI
+/* Hide the details if this is a MMIO or PIO address space and just do what
+ * you expect in the correct way. */
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+ IO_COND(addr, /* nothing */, iounmap(addr));
+}
+EXPORT_SYMBOL(pci_iounmap);
+#endif /* CONFIG_PCI */
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
new file mode 100644
index 000000000..5de7c04e0
--- /dev/null
+++ b/lib/iomap_copy.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2006 PathScale, Inc. All Rights Reserved.
+ */
+
+#include <linux/export.h>
+#include <linux/io.h>
+
+/**
+ * __iowrite32_copy - copy data to MMIO space, in 32-bit units
+ * @to: destination, in MMIO space (must be 32-bit aligned)
+ * @from: source (must be 32-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in units of 32 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+ u32 __iomem *dst = to;
+ const u32 *src = from;
+ const u32 *end = src + count;
+
+ while (src < end)
+ __raw_writel(*src++, dst++);
+}
+EXPORT_SYMBOL_GPL(__iowrite32_copy);
+
+/**
+ * __ioread32_copy - copy data from MMIO space, in 32-bit units
+ * @to: destination (must be 32-bit aligned)
+ * @from: source, in MMIO space (must be 32-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from MMIO space to kernel space, in units of 32 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __ioread32_copy(void *to, const void __iomem *from, size_t count)
+{
+ u32 *dst = to;
+ const u32 __iomem *src = from;
+ const u32 __iomem *end = src + count;
+
+ while (src < end)
+ *dst++ = __raw_readl(src++);
+}
+EXPORT_SYMBOL_GPL(__ioread32_copy);
+
+/**
+ * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
+ * @to: destination, in MMIO space (must be 64-bit aligned)
+ * @from: source (must be 64-bit aligned)
+ * @count: number of 64-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
+ * time. Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
+ const void *from,
+ size_t count)
+{
+#ifdef CONFIG_64BIT
+ u64 __iomem *dst = to;
+ const u64 *src = from;
+ const u64 *end = src + count;
+
+ while (src < end)
+ __raw_writeq(*src++, dst++);
+#else
+ __iowrite32_copy(to, from, count * 2);
+#endif
+}
+
+EXPORT_SYMBOL_GPL(__iowrite64_copy);
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
new file mode 100644
index 000000000..92a9f243c
--- /dev/null
+++ b/lib/iommu-helper.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IOMMU helper functions for the free area management
+ */
+
+#include <linux/bitmap.h>
+#include <linux/iommu-helper.h>
+
+unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr,
+ unsigned long shift, unsigned long boundary_size,
+ unsigned long align_mask)
+{
+ unsigned long index;
+
+ /* We don't want the last of the limit */
+ size -= 1;
+again:
+ index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ if (index < size) {
+ if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
+ start = ALIGN(shift + index, boundary_size) - shift;
+ goto again;
+ }
+ bitmap_set(map, index, nr);
+ return index;
+ }
+ return -1;
+}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
new file mode 100644
index 000000000..27234a820
--- /dev/null
+++ b/lib/iov_iter.c
@@ -0,0 +1,1862 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <crypto/hash.h>
+#include <linux/export.h>
+#include <linux/bvec.h>
+#include <linux/fault-inject-usercopy.h>
+#include <linux/uio.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/splice.h>
+#include <linux/compat.h>
+#include <net/checksum.h>
+#include <linux/scatterlist.h>
+#include <linux/instrumented.h>
+
+/* covers ubuf and kbuf alike */
+#define iterate_buf(i, n, base, len, off, __p, STEP) { \
+ size_t __maybe_unused off = 0; \
+ len = n; \
+ base = __p + i->iov_offset; \
+ len -= (STEP); \
+ i->iov_offset += len; \
+ n = len; \
+}
+
+/* covers iovec and kvec alike */
+#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
+ size_t off = 0; \
+ size_t skip = i->iov_offset; \
+ do { \
+ len = min(n, __p->iov_len - skip); \
+ if (likely(len)) { \
+ base = __p->iov_base + skip; \
+ len -= (STEP); \
+ off += len; \
+ skip += len; \
+ n -= len; \
+ if (skip < __p->iov_len) \
+ break; \
+ } \
+ __p++; \
+ skip = 0; \
+ } while (n); \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_bvec(i, n, base, len, off, p, STEP) { \
+ size_t off = 0; \
+ unsigned skip = i->iov_offset; \
+ while (n) { \
+ unsigned offset = p->bv_offset + skip; \
+ unsigned left; \
+ void *kaddr = kmap_local_page(p->bv_page + \
+ offset / PAGE_SIZE); \
+ base = kaddr + offset % PAGE_SIZE; \
+ len = min(min(n, (size_t)(p->bv_len - skip)), \
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
+ left = (STEP); \
+ kunmap_local(kaddr); \
+ len -= left; \
+ off += len; \
+ skip += len; \
+ if (skip == p->bv_len) { \
+ skip = 0; \
+ p++; \
+ } \
+ n -= len; \
+ if (left) \
+ break; \
+ } \
+ i->iov_offset = skip; \
+ n = off; \
+}
+
+#define iterate_xarray(i, n, base, len, __off, STEP) { \
+ __label__ __out; \
+ size_t __off = 0; \
+ struct folio *folio; \
+ loff_t start = i->xarray_start + i->iov_offset; \
+ pgoff_t index = start / PAGE_SIZE; \
+ XA_STATE(xas, i->xarray, index); \
+ \
+ len = PAGE_SIZE - offset_in_page(start); \
+ rcu_read_lock(); \
+ xas_for_each(&xas, folio, ULONG_MAX) { \
+ unsigned left; \
+ size_t offset; \
+ if (xas_retry(&xas, folio)) \
+ continue; \
+ if (WARN_ON(xa_is_value(folio))) \
+ break; \
+ if (WARN_ON(folio_test_hugetlb(folio))) \
+ break; \
+ offset = offset_in_folio(folio, start + __off); \
+ while (offset < folio_size(folio)) { \
+ base = kmap_local_folio(folio, offset); \
+ len = min(n, len); \
+ left = (STEP); \
+ kunmap_local(base); \
+ len -= left; \
+ __off += len; \
+ n -= len; \
+ if (left || n == 0) \
+ goto __out; \
+ offset += len; \
+ len = PAGE_SIZE; \
+ } \
+ } \
+__out: \
+ rcu_read_unlock(); \
+ i->iov_offset += __off; \
+ n = __off; \
+}
+
+#define __iterate_and_advance(i, n, base, len, off, I, K) { \
+ if (unlikely(i->count < n)) \
+ n = i->count; \
+ if (likely(n)) { \
+ if (likely(iter_is_ubuf(i))) { \
+ void __user *base; \
+ size_t len; \
+ iterate_buf(i, n, base, len, off, \
+ i->ubuf, (I)) \
+ } else if (likely(iter_is_iovec(i))) { \
+ const struct iovec *iov = iter_iov(i); \
+ void __user *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ iov, (I)) \
+ i->nr_segs -= iov - iter_iov(i); \
+ i->__iov = iov; \
+ } else if (iov_iter_is_bvec(i)) { \
+ const struct bio_vec *bvec = i->bvec; \
+ void *base; \
+ size_t len; \
+ iterate_bvec(i, n, base, len, off, \
+ bvec, (K)) \
+ i->nr_segs -= bvec - i->bvec; \
+ i->bvec = bvec; \
+ } else if (iov_iter_is_kvec(i)) { \
+ const struct kvec *kvec = i->kvec; \
+ void *base; \
+ size_t len; \
+ iterate_iovec(i, n, base, len, off, \
+ kvec, (K)) \
+ i->nr_segs -= kvec - i->kvec; \
+ i->kvec = kvec; \
+ } else if (iov_iter_is_xarray(i)) { \
+ void *base; \
+ size_t len; \
+ iterate_xarray(i, n, base, len, off, \
+ (K)) \
+ } \
+ i->count -= n; \
+ } \
+}
+#define iterate_and_advance(i, n, base, len, off, I, K) \
+ __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
+
+static int copyout(void __user *to, const void *from, size_t n)
+{
+ if (should_fail_usercopy())
+ return n;
+ if (access_ok(to, n)) {
+ instrument_copy_to_user(to, from, n);
+ n = raw_copy_to_user(to, from, n);
+ }
+ return n;
+}
+
+static int copyout_nofault(void __user *to, const void *from, size_t n)
+{
+ long res;
+
+ if (should_fail_usercopy())
+ return n;
+
+ res = copy_to_user_nofault(to, from, n);
+
+ return res < 0 ? n : res;
+}
+
+static int copyin(void *to, const void __user *from, size_t n)
+{
+ size_t res = n;
+
+ if (should_fail_usercopy())
+ return n;
+ if (access_ok(from, n)) {
+ instrument_copy_from_user_before(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ }
+ return res;
+}
+
+/*
+ * fault_in_iov_iter_readable - fault in iov iterator for reading
+ * @i: iterator
+ * @size: maximum length
+ *
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * @size. For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ *
+ * Always returns 0 for non-userspace iterators.
+ */
+size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
+{
+ if (iter_is_ubuf(i)) {
+ size_t n = min(size, iov_iter_count(i));
+ n -= fault_in_readable(i->ubuf + i->iov_offset, n);
+ return size - n;
+ } else if (iter_is_iovec(i)) {
+ size_t count = min(size, iov_iter_count(i));
+ const struct iovec *p;
+ size_t skip;
+
+ size -= count;
+ for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
+ size_t len = min(count, p->iov_len - skip);
+ size_t ret;
+
+ if (unlikely(!len))
+ continue;
+ ret = fault_in_readable(p->iov_base + skip, len);
+ count -= len - ret;
+ if (ret)
+ break;
+ }
+ return count + size;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(fault_in_iov_iter_readable);
+
+/*
+ * fault_in_iov_iter_writeable - fault in iov iterator for writing
+ * @i: iterator
+ * @size: maximum length
+ *
+ * Faults in the iterator using get_user_pages(), i.e., without triggering
+ * hardware page faults. This is primarily useful when we already know that
+ * some or all of the pages in @i aren't in memory.
+ *
+ * Returns the number of bytes not faulted in, like copy_to_user() and
+ * copy_from_user().
+ *
+ * Always returns 0 for non-user-space iterators.
+ */
+size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
+{
+ if (iter_is_ubuf(i)) {
+ size_t n = min(size, iov_iter_count(i));
+ n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
+ return size - n;
+ } else if (iter_is_iovec(i)) {
+ size_t count = min(size, iov_iter_count(i));
+ const struct iovec *p;
+ size_t skip;
+
+ size -= count;
+ for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
+ size_t len = min(count, p->iov_len - skip);
+ size_t ret;
+
+ if (unlikely(!len))
+ continue;
+ ret = fault_in_safe_writeable(p->iov_base + skip, len);
+ count -= len - ret;
+ if (ret)
+ break;
+ }
+ return count + size;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(fault_in_iov_iter_writeable);
+
+void iov_iter_init(struct iov_iter *i, unsigned int direction,
+ const struct iovec *iov, unsigned long nr_segs,
+ size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter) {
+ .iter_type = ITER_IOVEC,
+ .copy_mc = false,
+ .nofault = false,
+ .user_backed = true,
+ .data_source = direction,
+ .__iov = iov,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
+}
+EXPORT_SYMBOL(iov_iter_init);
+
+static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
+ __wsum sum, size_t off)
+{
+ __wsum next = csum_partial_copy_nocheck(from, to, len);
+ return csum_block_add(sum, next, off);
+}
+
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (user_backed_iter(i))
+ might_fault();
+ iterate_and_advance(i, bytes, base, len, off,
+ copyout(base, addr + off, len),
+ memcpy(base, addr + off, len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(_copy_to_iter);
+
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+static int copyout_mc(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(to, n)) {
+ instrument_copy_to_user(to, from, n);
+ n = copy_mc_to_user((__force void *) to, from, n);
+ }
+ return n;
+}
+
+/**
+ * _copy_mc_to_iter - copy to iter with source memory error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @i: destination iterator
+ *
+ * The pmem driver deploys this for the dax operation
+ * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
+ * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
+ * successfully copied.
+ *
+ * The main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ * byte-by-byte until the fault happens again. Re-triggering machine
+ * checks is potentially fatal so the implementation uses source
+ * alignment and poison alignment assumptions to avoid re-triggering
+ * hardware exceptions.
+ *
+ * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
+ * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
+ *
+ * Return: number of bytes copied (may be %0)
+ */
+size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (user_backed_iter(i))
+ might_fault();
+ __iterate_and_advance(i, bytes, base, len, off,
+ copyout_mc(base, addr + off, len),
+ copy_mc_to_kernel(base, addr + off, len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
+#endif /* CONFIG_ARCH_HAS_COPY_MC */
+
+static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
+ size_t size)
+{
+ if (iov_iter_is_copy_mc(i))
+ return (void *)copy_mc_to_kernel(to, from, size);
+ return memcpy(to, from, size);
+}
+
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(!i->data_source))
+ return 0;
+
+ if (user_backed_iter(i))
+ might_fault();
+ iterate_and_advance(i, bytes, base, len, off,
+ copyin(addr + off, base, len),
+ memcpy_from_iter(i, addr + off, base, len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(_copy_from_iter);
+
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(!i->data_source))
+ return 0;
+
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_inatomic_nocache(addr + off, base, len),
+ memcpy(addr + off, base, len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(_copy_from_iter_nocache);
+
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @i: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ *
+ * Return: number of bytes copied (may be %0)
+ */
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (WARN_ON_ONCE(!i->data_source))
+ return 0;
+
+ iterate_and_advance(i, bytes, base, len, off,
+ __copy_from_user_flushcache(addr + off, base, len),
+ memcpy_flushcache(addr + off, base, len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
+#endif
+
+static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+{
+ struct page *head;
+ size_t v = n + offset;
+
+ /*
+ * The general case needs to access the page order in order
+ * to compute the page size.
+ * However, we mostly deal with order-0 pages and thus can
+ * avoid a possible cache line miss for requests that fit all
+ * page orders.
+ */
+ if (n <= v && v <= PAGE_SIZE)
+ return true;
+
+ head = compound_head(page);
+ v += (page - head) << PAGE_SHIFT;
+
+ if (WARN_ON(n > v || v > page_size(head)))
+ return false;
+ return true;
+}
+
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ size_t res = 0;
+ if (!page_copy_sane(page, offset, bytes))
+ return 0;
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+ n = _copy_to_iter(kaddr + offset, n, i);
+ kunmap_local(kaddr);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(copy_page_to_iter);
+
+size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
+ struct iov_iter *i)
+{
+ size_t res = 0;
+
+ if (!page_copy_sane(page, offset, bytes))
+ return 0;
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+
+ iterate_and_advance(i, n, base, len, off,
+ copyout_nofault(base, kaddr + offset + off, len),
+ memcpy(base, kaddr + offset + off, len)
+ )
+ kunmap_local(kaddr);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(copy_page_to_iter_nofault);
+
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i)
+{
+ size_t res = 0;
+ if (!page_copy_sane(page, offset, bytes))
+ return 0;
+ page += offset / PAGE_SIZE; // first subpage
+ offset %= PAGE_SIZE;
+ while (1) {
+ void *kaddr = kmap_local_page(page);
+ size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
+ n = _copy_from_iter(kaddr + offset, n, i);
+ kunmap_local(kaddr);
+ res += n;
+ bytes -= n;
+ if (!bytes || !n)
+ break;
+ offset += n;
+ if (offset == PAGE_SIZE) {
+ page++;
+ offset = 0;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(copy_page_from_iter);
+
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ iterate_and_advance(i, bytes, base, len, count,
+ clear_user(base, len),
+ memset(base, 0, len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL(iov_iter_zero);
+
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ size_t n, copied = 0;
+
+ if (!page_copy_sane(page, offset, bytes))
+ return 0;
+ if (WARN_ON_ONCE(!i->data_source))
+ return 0;
+
+ do {
+ char *p;
+
+ n = bytes - copied;
+ if (PageHighMem(page)) {
+ page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+ n = min_t(size_t, n, PAGE_SIZE - offset);
+ }
+
+ p = kmap_atomic(page) + offset;
+ iterate_and_advance(i, n, base, len, off,
+ copyin(p + off, base, len),
+ memcpy_from_iter(i, p + off, base, len)
+ )
+ kunmap_atomic(p);
+ copied += n;
+ offset += n;
+ } while (PageHighMem(page) && copied != bytes && n > 0);
+
+ return copied;
+}
+EXPORT_SYMBOL(copy_page_from_iter_atomic);
+
+static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
+{
+ const struct bio_vec *bvec, *end;
+
+ if (!i->count)
+ return;
+ i->count -= size;
+
+ size += i->iov_offset;
+
+ for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
+ if (likely(size < bvec->bv_len))
+ break;
+ size -= bvec->bv_len;
+ }
+ i->iov_offset = size;
+ i->nr_segs -= bvec - i->bvec;
+ i->bvec = bvec;
+}
+
+static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
+{
+ const struct iovec *iov, *end;
+
+ if (!i->count)
+ return;
+ i->count -= size;
+
+ size += i->iov_offset; // from beginning of current segment
+ for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
+ if (likely(size < iov->iov_len))
+ break;
+ size -= iov->iov_len;
+ }
+ i->iov_offset = size;
+ i->nr_segs -= iov - iter_iov(i);
+ i->__iov = iov;
+}
+
+void iov_iter_advance(struct iov_iter *i, size_t size)
+{
+ if (unlikely(i->count < size))
+ size = i->count;
+ if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
+ i->iov_offset += size;
+ i->count -= size;
+ } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
+ /* iovec and kvec have identical layouts */
+ iov_iter_iovec_advance(i, size);
+ } else if (iov_iter_is_bvec(i)) {
+ iov_iter_bvec_advance(i, size);
+ } else if (iov_iter_is_discard(i)) {
+ i->count -= size;
+ }
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+ if (!unroll)
+ return;
+ if (WARN_ON(unroll > MAX_RW_COUNT))
+ return;
+ i->count += unroll;
+ if (unlikely(iov_iter_is_discard(i)))
+ return;
+ if (unroll <= i->iov_offset) {
+ i->iov_offset -= unroll;
+ return;
+ }
+ unroll -= i->iov_offset;
+ if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
+ BUG(); /* We should never go beyond the start of the specified
+ * range since we might then be straying into pages that
+ * aren't pinned.
+ */
+ } else if (iov_iter_is_bvec(i)) {
+ const struct bio_vec *bvec = i->bvec;
+ while (1) {
+ size_t n = (--bvec)->bv_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->bvec = bvec;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ } else { /* same logics for iovec and kvec */
+ const struct iovec *iov = iter_iov(i);
+ while (1) {
+ size_t n = (--iov)->iov_len;
+ i->nr_segs++;
+ if (unroll <= n) {
+ i->__iov = iov;
+ i->iov_offset = n - unroll;
+ return;
+ }
+ unroll -= n;
+ }
+ }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+ if (i->nr_segs > 1) {
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
+ if (iov_iter_is_bvec(i))
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
+ }
+ return i->count;
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
+
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
+ const struct kvec *kvec, unsigned long nr_segs,
+ size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter){
+ .iter_type = ITER_KVEC,
+ .copy_mc = false,
+ .data_source = direction,
+ .kvec = kvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
+}
+EXPORT_SYMBOL(iov_iter_kvec);
+
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
+ const struct bio_vec *bvec, unsigned long nr_segs,
+ size_t count)
+{
+ WARN_ON(direction & ~(READ | WRITE));
+ *i = (struct iov_iter){
+ .iter_type = ITER_BVEC,
+ .copy_mc = false,
+ .data_source = direction,
+ .bvec = bvec,
+ .nr_segs = nr_segs,
+ .iov_offset = 0,
+ .count = count
+ };
+}
+EXPORT_SYMBOL(iov_iter_bvec);
+
+/**
+ * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @xarray: The xarray to access.
+ * @start: The start file position.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator to either draw data out of the pages attached to an
+ * inode or to inject data into those pages. The pages *must* be prevented
+ * from evaporation, either by taking a ref on them or locking them by the
+ * caller.
+ */
+void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
+ struct xarray *xarray, loff_t start, size_t count)
+{
+ BUG_ON(direction & ~1);
+ *i = (struct iov_iter) {
+ .iter_type = ITER_XARRAY,
+ .copy_mc = false,
+ .data_source = direction,
+ .xarray = xarray,
+ .xarray_start = start,
+ .count = count,
+ .iov_offset = 0
+ };
+}
+EXPORT_SYMBOL(iov_iter_xarray);
+
+/**
+ * iov_iter_discard - Initialise an I/O iterator that discards data
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator that just discards everything that's written to it.
+ * It's only available as a READ iterator.
+ */
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
+{
+ BUG_ON(direction != READ);
+ *i = (struct iov_iter){
+ .iter_type = ITER_DISCARD,
+ .copy_mc = false,
+ .data_source = false,
+ .count = count,
+ .iov_offset = 0
+ };
+}
+EXPORT_SYMBOL(iov_iter_discard);
+
+static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
+
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(iov->iov_base + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
+}
+
+static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
+}
+
+/**
+ * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
+ * are aligned to the parameters.
+ *
+ * @i: &struct iov_iter to restore
+ * @addr_mask: bit mask to check against the iov element's addresses
+ * @len_mask: bit mask to check against the iov element's lengths
+ *
+ * Return: false if any addresses or lengths intersect with the provided masks
+ */
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ if (likely(iter_is_ubuf(i))) {
+ if (i->count & len_mask)
+ return false;
+ if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
+ return false;
+ return true;
+ }
+
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_aligned_iovec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_aligned_bvec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_xarray(i)) {
+ if (i->count & len_mask)
+ return false;
+ if ((i->xarray_start + i->iov_offset) & addr_mask)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
+
+static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
+{
+ unsigned long res = 0;
+ size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
+ if (len) {
+ res |= (unsigned long)iov->iov_base + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ }
+ return res;
+}
+
+static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
+{
+ unsigned res = 0;
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+ res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ if (len > size)
+ len = size;
+ res |= len;
+ size -= len;
+ if (!size)
+ break;
+ }
+ return res;
+}
+
+unsigned long iov_iter_alignment(const struct iov_iter *i)
+{
+ if (likely(iter_is_ubuf(i))) {
+ size_t size = i->count;
+ if (size)
+ return ((unsigned long)i->ubuf + i->iov_offset) | size;
+ return 0;
+ }
+
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_alignment_iovec(i);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_alignment_bvec(i);
+
+ if (iov_iter_is_xarray(i))
+ return (i->xarray_start + i->iov_offset) | i->count;
+
+ return 0;
+}
+EXPORT_SYMBOL(iov_iter_alignment);
+
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+{
+ unsigned long res = 0;
+ unsigned long v = 0;
+ size_t size = i->count;
+ unsigned k;
+
+ if (iter_is_ubuf(i))
+ return 0;
+
+ if (WARN_ON(!iter_is_iovec(i)))
+ return ~0U;
+
+ for (k = 0; k < i->nr_segs; k++) {
+ const struct iovec *iov = iter_iov(i) + k;
+ if (iov->iov_len) {
+ unsigned long base = (unsigned long)iov->iov_base;
+ if (v) // if not the first one
+ res |= base | v; // this start | previous end
+ v = base + iov->iov_len;
+ if (size <= iov->iov_len)
+ break;
+ size -= iov->iov_len;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(iov_iter_gap_alignment);
+
+static int want_pages_array(struct page ***res, size_t size,
+ size_t start, unsigned int maxpages)
+{
+ unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
+
+ if (count > maxpages)
+ count = maxpages;
+ WARN_ON(!count); // caller should've prevented that
+ if (!*res) {
+ *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!*res)
+ return 0;
+ }
+ return count;
+}
+
+static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
+ pgoff_t index, unsigned int nr_pages)
+{
+ XA_STATE(xas, xa, index);
+ struct page *page;
+ unsigned int ret = 0;
+
+ rcu_read_lock();
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ /* Has the page moved or been split? */
+ if (unlikely(page != xas_reload(&xas))) {
+ xas_reset(&xas);
+ continue;
+ }
+
+ pages[ret] = find_subpage(page, xas.xa_index);
+ get_page(pages[ret]);
+ if (++ret == nr_pages)
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static ssize_t iter_xarray_get_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned maxpages, size_t *_start_offset)
+{
+ unsigned nr, offset, count;
+ pgoff_t index;
+ loff_t pos;
+
+ pos = i->xarray_start + i->iov_offset;
+ index = pos >> PAGE_SHIFT;
+ offset = pos & ~PAGE_MASK;
+ *_start_offset = offset;
+
+ count = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!count)
+ return -ENOMEM;
+ nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
+ if (nr == 0)
+ return 0;
+
+ maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ i->iov_offset += maxsize;
+ i->count -= maxsize;
+ return maxsize;
+}
+
+/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
+static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
+{
+ size_t skip;
+ long k;
+
+ if (iter_is_ubuf(i))
+ return (unsigned long)i->ubuf + i->iov_offset;
+
+ for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
+ const struct iovec *iov = iter_iov(i) + k;
+ size_t len = iov->iov_len - skip;
+
+ if (unlikely(!len))
+ continue;
+ if (*size > len)
+ *size = len;
+ return (unsigned long)iov->iov_base + skip;
+ }
+ BUG(); // if it had been empty, we wouldn't get called
+}
+
+/* must be done on non-empty ITER_BVEC one */
+static struct page *first_bvec_segment(const struct iov_iter *i,
+ size_t *size, size_t *start)
+{
+ struct page *page;
+ size_t skip = i->iov_offset, len;
+
+ len = i->bvec->bv_len - skip;
+ if (*size > len)
+ *size = len;
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ *start = skip % PAGE_SIZE;
+ return page;
+}
+
+static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages, size_t *start)
+{
+ unsigned int n, gup_flags = 0;
+
+ if (maxsize > i->count)
+ maxsize = i->count;
+ if (!maxsize)
+ return 0;
+ if (maxsize > MAX_RW_COUNT)
+ maxsize = MAX_RW_COUNT;
+
+ if (likely(user_backed_iter(i))) {
+ unsigned long addr;
+ int res;
+
+ if (iov_iter_rw(i) != WRITE)
+ gup_flags |= FOLL_WRITE;
+ if (i->nofault)
+ gup_flags |= FOLL_NOFAULT;
+
+ addr = first_iovec_segment(i, &maxsize);
+ *start = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ n = want_pages_array(pages, maxsize, *start, maxpages);
+ if (!n)
+ return -ENOMEM;
+ res = get_user_pages_fast(addr, n, gup_flags, *pages);
+ if (unlikely(res <= 0))
+ return res;
+ maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
+ }
+ if (iov_iter_is_bvec(i)) {
+ struct page **p;
+ struct page *page;
+
+ page = first_bvec_segment(i, &maxsize, start);
+ n = want_pages_array(pages, maxsize, *start, maxpages);
+ if (!n)
+ return -ENOMEM;
+ p = *pages;
+ for (int k = 0; k < n; k++)
+ get_page(p[k] = page + k);
+ maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
+ i->count -= maxsize;
+ i->iov_offset += maxsize;
+ if (i->iov_offset == i->bvec->bv_len) {
+ i->iov_offset = 0;
+ i->bvec++;
+ i->nr_segs--;
+ }
+ return maxsize;
+ }
+ if (iov_iter_is_xarray(i))
+ return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
+ return -EFAULT;
+}
+
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
+ size_t maxsize, unsigned maxpages, size_t *start)
+{
+ if (!maxpages)
+ return 0;
+ BUG_ON(!pages);
+
+ return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
+}
+EXPORT_SYMBOL(iov_iter_get_pages2);
+
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
+ struct page ***pages, size_t maxsize, size_t *start)
+{
+ ssize_t len;
+
+ *pages = NULL;
+
+ len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
+ if (len <= 0) {
+ kvfree(*pages);
+ *pages = NULL;
+ }
+ return len;
+}
+EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
+
+size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
+ struct iov_iter *i)
+{
+ __wsum sum, next;
+ sum = *csum;
+ if (WARN_ON_ONCE(!i->data_source))
+ return 0;
+
+ iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_from_user(base, addr + off, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
+ }), ({
+ sum = csum_and_memcpy(addr + off, base, len, sum, off);
+ })
+ )
+ *csum = sum;
+ return bytes;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter);
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
+ struct iov_iter *i)
+{
+ struct csum_state *csstate = _csstate;
+ __wsum sum, next;
+
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (unlikely(iov_iter_is_discard(i))) {
+ // can't use csum_memcpy() for that one - data is not copied
+ csstate->csum = csum_block_add(csstate->csum,
+ csum_partial(addr, bytes, 0),
+ csstate->off);
+ csstate->off += bytes;
+ return bytes;
+ }
+
+ sum = csum_shift(csstate->csum, csstate->off);
+ iterate_and_advance(i, bytes, base, len, off, ({
+ next = csum_and_copy_to_user(addr + off, base, len);
+ sum = csum_block_add(sum, next, off);
+ next ? 0 : len;
+ }), ({
+ sum = csum_and_memcpy(base, addr + off, len, sum, off);
+ })
+ )
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
+ return bytes;
+}
+EXPORT_SYMBOL(csum_and_copy_to_iter);
+
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+#ifdef CONFIG_CRYPTO_HASH
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(hash_and_copy_to_iter);
+
+static int iov_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t skip = i->iov_offset, size = i->count;
+ const struct iovec *p;
+ int npages = 0;
+
+ for (p = iter_iov(i); size; skip = 0, p++) {
+ unsigned offs = offset_in_page(p->iov_base + skip);
+ size_t len = min(p->iov_len - skip, size);
+
+ if (len) {
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ }
+ return npages;
+}
+
+static int bvec_npages(const struct iov_iter *i, int maxpages)
+{
+ size_t skip = i->iov_offset, size = i->count;
+ const struct bio_vec *p;
+ int npages = 0;
+
+ for (p = i->bvec; size; skip = 0, p++) {
+ unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
+ size_t len = min(p->bv_len - skip, size);
+
+ size -= len;
+ npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
+ if (unlikely(npages > maxpages))
+ return maxpages;
+ }
+ return npages;
+}
+
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
+{
+ if (unlikely(!i->count))
+ return 0;
+ if (likely(iter_is_ubuf(i))) {
+ unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
+ int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ /* iovec and kvec have identical layouts */
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_npages(i, maxpages);
+ if (iov_iter_is_bvec(i))
+ return bvec_npages(i, maxpages);
+ if (iov_iter_is_xarray(i)) {
+ unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
+ int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
+ return min(npages, maxpages);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iov_iter_npages);
+
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
+{
+ *new = *old;
+ if (iov_iter_is_bvec(new))
+ return new->bvec = kmemdup(new->bvec,
+ new->nr_segs * sizeof(struct bio_vec),
+ flags);
+ else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
+ /* iovec and kvec have identical layout */
+ return new->__iov = kmemdup(new->__iov,
+ new->nr_segs * sizeof(struct iovec),
+ flags);
+ return NULL;
+}
+EXPORT_SYMBOL(dup_iter);
+
+static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
+ const struct iovec __user *uvec, unsigned long nr_segs)
+{
+ const struct compat_iovec __user *uiov =
+ (const struct compat_iovec __user *)uvec;
+ int ret = -EFAULT, i;
+
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
+ return -EFAULT;
+
+ for (i = 0; i < nr_segs; i++) {
+ compat_uptr_t buf;
+ compat_ssize_t len;
+
+ unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
+ unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
+
+ /* check for compat_size_t not fitting in compat_ssize_t .. */
+ if (len < 0) {
+ ret = -EINVAL;
+ goto uaccess_end;
+ }
+ iov[i].iov_base = compat_ptr(buf);
+ iov[i].iov_len = len;
+ }
+
+ ret = 0;
+uaccess_end:
+ user_access_end();
+ return ret;
+}
+
+static __noclone int copy_iovec_from_user(struct iovec *iov,
+ const struct iovec __user *uiov, unsigned long nr_segs)
+{
+ int ret = -EFAULT;
+
+ if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
+ return -EFAULT;
+
+ do {
+ void __user *buf;
+ ssize_t len;
+
+ unsafe_get_user(len, &uiov->iov_len, uaccess_end);
+ unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
+
+ /* check for size_t not fitting in ssize_t .. */
+ if (unlikely(len < 0)) {
+ ret = -EINVAL;
+ goto uaccess_end;
+ }
+ iov->iov_base = buf;
+ iov->iov_len = len;
+
+ uiov++; iov++;
+ } while (--nr_segs);
+
+ ret = 0;
+uaccess_end:
+ user_access_end();
+ return ret;
+}
+
+struct iovec *iovec_from_user(const struct iovec __user *uvec,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_iov, bool compat)
+{
+ struct iovec *iov = fast_iov;
+ int ret;
+
+ /*
+ * SuS says "The readv() function *may* fail if the iovcnt argument was
+ * less than or equal to 0, or greater than {IOV_MAX}. Linux has
+ * traditionally returned zero for zero segments, so...
+ */
+ if (nr_segs == 0)
+ return iov;
+ if (nr_segs > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+ if (nr_segs > fast_segs) {
+ iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
+ if (!iov)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(compat))
+ ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
+ else
+ ret = copy_iovec_from_user(iov, uvec, nr_segs);
+ if (ret) {
+ if (iov != fast_iov)
+ kfree(iov);
+ return ERR_PTR(ret);
+ }
+
+ return iov;
+}
+
+/*
+ * Single segment iovec supplied by the user, import it as ITER_UBUF.
+ */
+static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
+ struct iovec **iovp, struct iov_iter *i,
+ bool compat)
+{
+ struct iovec *iov = *iovp;
+ ssize_t ret;
+
+ if (compat)
+ ret = copy_compat_iovec_from_user(iov, uvec, 1);
+ else
+ ret = copy_iovec_from_user(iov, uvec, 1);
+ if (unlikely(ret))
+ return ret;
+
+ ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
+ if (unlikely(ret))
+ return ret;
+ *iovp = NULL;
+ return i->count;
+}
+
+ssize_t __import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
+ struct iov_iter *i, bool compat)
+{
+ ssize_t total_len = 0;
+ unsigned long seg;
+ struct iovec *iov;
+
+ if (nr_segs == 1)
+ return __import_iovec_ubuf(type, uvec, iovp, i, compat);
+
+ iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
+ if (IS_ERR(iov)) {
+ *iovp = NULL;
+ return PTR_ERR(iov);
+ }
+
+ /*
+ * According to the Single Unix Specification we should return EINVAL if
+ * an element length is < 0 when cast to ssize_t or if the total length
+ * would overflow the ssize_t return value of the system call.
+ *
+ * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
+ * overflow case.
+ */
+ for (seg = 0; seg < nr_segs; seg++) {
+ ssize_t len = (ssize_t)iov[seg].iov_len;
+
+ if (!access_ok(iov[seg].iov_base, len)) {
+ if (iov != *iovp)
+ kfree(iov);
+ *iovp = NULL;
+ return -EFAULT;
+ }
+
+ if (len > MAX_RW_COUNT - total_len) {
+ len = MAX_RW_COUNT - total_len;
+ iov[seg].iov_len = len;
+ }
+ total_len += len;
+ }
+
+ iov_iter_init(i, type, iov, nr_segs, total_len);
+ if (iov == *iovp)
+ *iovp = NULL;
+ else
+ *iovp = iov;
+ return total_len;
+}
+
+/**
+ * import_iovec() - Copy an array of &struct iovec from userspace
+ * into the kernel, check that it is valid, and initialize a new
+ * &struct iov_iter iterator to access it.
+ *
+ * @type: One of %READ or %WRITE.
+ * @uvec: Pointer to the userspace array.
+ * @nr_segs: Number of elements in userspace array.
+ * @fast_segs: Number of elements in @iov.
+ * @iovp: (input and output parameter) Pointer to pointer to (usually small
+ * on-stack) kernel array.
+ * @i: Pointer to iterator that will be initialized on success.
+ *
+ * If the array pointed to by *@iov is large enough to hold all @nr_segs,
+ * then this function places %NULL in *@iov on return. Otherwise, a new
+ * array will be allocated and the result placed in *@iov. This means that
+ * the caller may call kfree() on *@iov regardless of whether the small
+ * on-stack array was used or not (and regardless of whether this function
+ * returns an error or not).
+ *
+ * Return: Negative error code on error, bytes imported on success
+ */
+ssize_t import_iovec(int type, const struct iovec __user *uvec,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iovp, struct iov_iter *i)
+{
+ return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
+ in_compat_syscall());
+}
+EXPORT_SYMBOL(import_iovec);
+
+int import_single_range(int rw, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i)
+{
+ if (len > MAX_RW_COUNT)
+ len = MAX_RW_COUNT;
+ if (unlikely(!access_ok(buf, len)))
+ return -EFAULT;
+
+ iov_iter_ubuf(i, rw, buf, len);
+ return 0;
+}
+EXPORT_SYMBOL(import_single_range);
+
+int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
+{
+ if (len > MAX_RW_COUNT)
+ len = MAX_RW_COUNT;
+ if (unlikely(!access_ok(buf, len)))
+ return -EFAULT;
+
+ iov_iter_ubuf(i, rw, buf, len);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(import_ubuf);
+
+/**
+ * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
+ * iov_iter_save_state() was called.
+ *
+ * @i: &struct iov_iter to restore
+ * @state: state to restore from
+ *
+ * Used after iov_iter_save_state() to bring restore @i, if operations may
+ * have advanced it.
+ *
+ * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
+ */
+void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
+{
+ if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
+ !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
+ return;
+ i->iov_offset = state->iov_offset;
+ i->count = state->count;
+ if (iter_is_ubuf(i))
+ return;
+ /*
+ * For the *vec iters, nr_segs + iov is constant - if we increment
+ * the vec, then we also decrement the nr_segs count. Hence we don't
+ * need to track both of these, just one is enough and we can deduct
+ * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
+ * size, so we can just increment the iov pointer as they are unionzed.
+ * ITER_BVEC _may_ be the same size on some archs, but on others it is
+ * not. Be safe and handle it separately.
+ */
+ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
+ if (iov_iter_is_bvec(i))
+ i->bvec -= state->nr_segs - i->nr_segs;
+ else
+ i->__iov -= state->nr_segs - i->nr_segs;
+ i->nr_segs = state->nr_segs;
+}
+
+/*
+ * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
+ * get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ struct page *page, **p;
+ unsigned int nr = 0, offset;
+ loff_t pos = i->xarray_start + i->iov_offset;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ XA_STATE(xas, i->xarray, index);
+
+ offset = pos & ~PAGE_MASK;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+
+ rcu_read_lock();
+ for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+ if (xas_retry(&xas, page))
+ continue;
+
+ /* Has the page moved or been split? */
+ if (unlikely(page != xas_reload(&xas))) {
+ xas_reset(&xas);
+ continue;
+ }
+
+ p[nr++] = find_subpage(page, xas.xa_index);
+ if (nr == maxpages)
+ break;
+ }
+ rcu_read_unlock();
+
+ maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
+}
+
+/*
+ * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
+ * not get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ struct page **p, *page;
+ size_t skip = i->iov_offset, offset, size;
+ int k;
+
+ for (;;) {
+ if (i->nr_segs == 0)
+ return 0;
+ size = min(maxsize, i->bvec->bv_len - skip);
+ if (size)
+ break;
+ i->iov_offset = 0;
+ i->nr_segs--;
+ i->bvec++;
+ skip = 0;
+ }
+
+ skip += i->bvec->bv_offset;
+ page = i->bvec->bv_page + skip / PAGE_SIZE;
+ offset = skip % PAGE_SIZE;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, size, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+ for (k = 0; k < maxpages; k++)
+ p[k] = page + k;
+
+ size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
+ iov_iter_advance(i, size);
+ return size;
+}
+
+/*
+ * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
+ * This does not get references on the pages, nor does it get a pin on them.
+ */
+static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
+ struct page ***pages, size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ struct page **p, *page;
+ const void *kaddr;
+ size_t skip = i->iov_offset, offset, len, size;
+ int k;
+
+ for (;;) {
+ if (i->nr_segs == 0)
+ return 0;
+ size = min(maxsize, i->kvec->iov_len - skip);
+ if (size)
+ break;
+ i->iov_offset = 0;
+ i->nr_segs--;
+ i->kvec++;
+ skip = 0;
+ }
+
+ kaddr = i->kvec->iov_base + skip;
+ offset = (unsigned long)kaddr & ~PAGE_MASK;
+ *offset0 = offset;
+
+ maxpages = want_pages_array(pages, size, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ p = *pages;
+
+ kaddr -= offset;
+ len = offset + size;
+ for (k = 0; k < maxpages; k++) {
+ size_t seg = min_t(size_t, len, PAGE_SIZE);
+
+ if (is_vmalloc_or_module_addr(kaddr))
+ page = vmalloc_to_page(kaddr);
+ else
+ page = virt_to_page(kaddr);
+
+ p[k] = page;
+ len -= seg;
+ kaddr += PAGE_SIZE;
+ }
+
+ size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
+ iov_iter_advance(i, size);
+ return size;
+}
+
+/*
+ * Extract a list of contiguous pages from a user iterator and get a pin on
+ * each of them. This should only be used if the iterator is user-backed
+ * (IOBUF/UBUF).
+ *
+ * It does not get refs on the pages, but the pages must be unpinned by the
+ * caller once the transfer is complete.
+ *
+ * This is safe to be used where background IO/DMA *is* going to be modifying
+ * the buffer; using a pin rather than a ref makes forces fork() to give the
+ * child a copy of the page.
+ */
+static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
+ struct page ***pages,
+ size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ unsigned long addr;
+ unsigned int gup_flags = 0;
+ size_t offset;
+ int res;
+
+ if (i->data_source == ITER_DEST)
+ gup_flags |= FOLL_WRITE;
+ if (extraction_flags & ITER_ALLOW_P2PDMA)
+ gup_flags |= FOLL_PCI_P2PDMA;
+ if (i->nofault)
+ gup_flags |= FOLL_NOFAULT;
+
+ addr = first_iovec_segment(i, &maxsize);
+ *offset0 = offset = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ maxpages = want_pages_array(pages, maxsize, offset, maxpages);
+ if (!maxpages)
+ return -ENOMEM;
+ res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
+ if (unlikely(res <= 0))
+ return res;
+ maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
+ iov_iter_advance(i, maxsize);
+ return maxsize;
+}
+
+/**
+ * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
+ * @i: The iterator to extract from
+ * @pages: Where to return the list of pages
+ * @maxsize: The maximum amount of iterator to extract
+ * @maxpages: The maximum size of the list of pages
+ * @extraction_flags: Flags to qualify request
+ * @offset0: Where to return the starting offset into (*@pages)[0]
+ *
+ * Extract a list of contiguous pages from the current point of the iterator,
+ * advancing the iterator. The maximum number of pages and the maximum amount
+ * of page contents can be set.
+ *
+ * If *@pages is NULL, a page list will be allocated to the required size and
+ * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
+ * that the caller allocated a page list at least @maxpages in size and this
+ * will be filled in.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
+ * be allowed on the pages extracted.
+ *
+ * The iov_iter_extract_will_pin() function can be used to query how cleanup
+ * should be performed.
+ *
+ * Extra refs or pins on the pages may be obtained as follows:
+ *
+ * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
+ * added to the pages, but refs will not be taken.
+ * iov_iter_extract_will_pin() will return true.
+ *
+ * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
+ * merely listed; no extra refs or pins are obtained.
+ * iov_iter_extract_will_pin() will return 0.
+ *
+ * Note also:
+ *
+ * (*) Use with ITER_DISCARD is not supported as that has no content.
+ *
+ * On success, the function sets *@pages to the new pagelist, if allocated, and
+ * sets *offset0 to the offset into the first page.
+ *
+ * It may also return -ENOMEM and -EFAULT.
+ */
+ssize_t iov_iter_extract_pages(struct iov_iter *i,
+ struct page ***pages,
+ size_t maxsize,
+ unsigned int maxpages,
+ iov_iter_extraction_t extraction_flags,
+ size_t *offset0)
+{
+ maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
+ if (!maxsize)
+ return 0;
+
+ if (likely(user_backed_iter(i)))
+ return iov_iter_extract_user_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ if (iov_iter_is_kvec(i))
+ return iov_iter_extract_kvec_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ if (iov_iter_is_bvec(i))
+ return iov_iter_extract_bvec_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ if (iov_iter_is_xarray(i))
+ return iov_iter_extract_xarray_pages(i, pages, maxsize,
+ maxpages, extraction_flags,
+ offset0);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
new file mode 100644
index 000000000..2d5329a42
--- /dev/null
+++ b/lib/irq_poll.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions related to interrupt-poll handling in the block layer. This
+ * is similar to NAPI for network devices.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/irq_poll.h>
+#include <linux/delay.h>
+
+static unsigned int irq_poll_budget __read_mostly = 256;
+
+static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
+
+/**
+ * irq_poll_sched - Schedule a run of the iopoll handler
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * Add this irq_poll structure to the pending poll list and trigger the
+ * raise of the blk iopoll softirq.
+ **/
+void irq_poll_sched(struct irq_poll *iop)
+{
+ unsigned long flags;
+
+ if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
+ return;
+ if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
+ return;
+
+ local_irq_save(flags);
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
+ raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(irq_poll_sched);
+
+/**
+ * __irq_poll_complete - Mark this @iop as un-polled again
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * See irq_poll_complete(). This function must be called with interrupts
+ * disabled.
+ **/
+static void __irq_poll_complete(struct irq_poll *iop)
+{
+ list_del(&iop->list);
+ smp_mb__before_atomic();
+ clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
+}
+
+/**
+ * irq_poll_complete - Mark this @iop as un-polled again
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * If a driver consumes less than the assigned budget in its run of the
+ * iopoll handler, it'll end the polled mode by calling this function. The
+ * iopoll handler will not be invoked again before irq_poll_sched()
+ * is called.
+ **/
+void irq_poll_complete(struct irq_poll *iop)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __irq_poll_complete(iop);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(irq_poll_complete);
+
+static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+{
+ struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
+ int rearm = 0, budget = irq_poll_budget;
+ unsigned long start_time = jiffies;
+
+ local_irq_disable();
+
+ while (!list_empty(list)) {
+ struct irq_poll *iop;
+ int work, weight;
+
+ /*
+ * If softirq window is exhausted then punt.
+ */
+ if (budget <= 0 || time_after(jiffies, start_time)) {
+ rearm = 1;
+ break;
+ }
+
+ local_irq_enable();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+ * entries to the tail of this list, and only ->poll()
+ * calls can remove this head entry from the list.
+ */
+ iop = list_entry(list->next, struct irq_poll, list);
+
+ weight = iop->weight;
+ work = 0;
+ if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
+ work = iop->poll(iop, weight);
+
+ budget -= work;
+
+ local_irq_disable();
+
+ /*
+ * Drivers must not modify the iopoll state, if they
+ * consume their assigned weight (or more, some drivers can't
+ * easily just stop processing, they have to complete an
+ * entire mask of commands).In such cases this code
+ * still "owns" the iopoll instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (work >= weight) {
+ if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
+ __irq_poll_complete(iop);
+ else
+ list_move_tail(&iop->list, list);
+ }
+ }
+
+ if (rearm)
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+
+ local_irq_enable();
+}
+
+/**
+ * irq_poll_disable - Disable iopoll on this @iop
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * Disable io polling and wait for any pending callbacks to have completed.
+ **/
+void irq_poll_disable(struct irq_poll *iop)
+{
+ set_bit(IRQ_POLL_F_DISABLE, &iop->state);
+ while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
+ msleep(1);
+ clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
+}
+EXPORT_SYMBOL(irq_poll_disable);
+
+/**
+ * irq_poll_enable - Enable iopoll on this @iop
+ * @iop: The parent iopoll structure
+ *
+ * Description:
+ * Enable iopoll on this @iop. Note that the handler run will not be
+ * scheduled, it will only mark it as active.
+ **/
+void irq_poll_enable(struct irq_poll *iop)
+{
+ BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
+ smp_mb__before_atomic();
+ clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
+}
+EXPORT_SYMBOL(irq_poll_enable);
+
+/**
+ * irq_poll_init - Initialize this @iop
+ * @iop: The parent iopoll structure
+ * @weight: The default weight (or command completion budget)
+ * @poll_fn: The handler to invoke
+ *
+ * Description:
+ * Initialize and enable this irq_poll structure.
+ **/
+void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
+{
+ memset(iop, 0, sizeof(*iop));
+ INIT_LIST_HEAD(&iop->list);
+ iop->weight = weight;
+ iop->poll = poll_fn;
+}
+EXPORT_SYMBOL(irq_poll_init);
+
+static int irq_poll_cpu_dead(unsigned int cpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU and
+ * set the POLL softirq bit. The local_bh_disable()/enable() pair
+ * ensures that it is handled. Otherwise the current CPU could
+ * reach idle with the POLL softirq pending.
+ */
+ local_bh_disable();
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
+ this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_enable();
+ local_bh_enable();
+
+ return 0;
+}
+
+static __init int irq_poll_setup(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
+
+ open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
+ cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
+ irq_poll_cpu_dead);
+ return 0;
+}
+subsys_initcall(irq_poll_setup);
diff --git a/lib/irq_regs.c b/lib/irq_regs.c
new file mode 100644
index 000000000..0d545a930
--- /dev/null
+++ b/lib/irq_regs.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* saved per-CPU IRQ register pointer
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#include <linux/export.h>
+#include <linux/percpu.h>
+#include <asm/irq_regs.h>
+
+#ifndef ARCH_HAS_OWN_IRQ_REGS
+DEFINE_PER_CPU(struct pt_regs *, __irq_regs);
+EXPORT_PER_CPU_SYMBOL(__irq_regs);
+#endif
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
new file mode 100644
index 000000000..0a7f6ae62
--- /dev/null
+++ b/lib/is_signed_type_kunit.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/compiler.h>
+
+enum unsigned_enum {
+ constant_a = 3,
+};
+
+enum signed_enum {
+ constant_b = -1,
+ constant_c = 2,
+};
+
+static void is_signed_type_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(long long), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true);
+ KUNIT_EXPECT_EQ(test, is_signed_type(void *), false);
+ KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false);
+}
+
+static struct kunit_case is_signed_type_test_cases[] = {
+ KUNIT_CASE(is_signed_type_test),
+ {}
+};
+
+static struct kunit_suite is_signed_type_test_suite = {
+ .name = "is_signed_type",
+ .test_cases = is_signed_type_test_cases,
+};
+
+kunit_test_suite(is_signed_type_test_suite);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
new file mode 100644
index 000000000..8c98b20bf
--- /dev/null
+++ b/lib/is_single_threaded.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Function to determine if a thread group is single threaded or not
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from security/selinux/hooks.c
+ */
+#include <linux/sched/signal.h>
+#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
+
+/*
+ * Returns true if the task does not share ->mm with another thread/process.
+ */
+bool current_is_single_threaded(void)
+{
+ struct task_struct *task = current;
+ struct mm_struct *mm = task->mm;
+ struct task_struct *p, *t;
+ bool ret;
+
+ if (atomic_read(&task->signal->live) != 1)
+ return false;
+
+ if (atomic_read(&mm->mm_users) == 1)
+ return true;
+
+ ret = false;
+ rcu_read_lock();
+ for_each_process(p) {
+ if (unlikely(p->flags & PF_KTHREAD))
+ continue;
+ if (unlikely(p == task->group_leader))
+ continue;
+
+ for_each_thread(p, t) {
+ if (unlikely(t->mm == mm))
+ goto found;
+ if (likely(t->mm))
+ break;
+ /*
+ * t->mm == NULL. Make sure next_thread/next_task
+ * will see other CLONE_VM tasks which might be
+ * forked before exiting.
+ */
+ smp_rmb();
+ }
+ }
+ ret = true;
+found:
+ rcu_read_unlock();
+
+ return ret;
+}
diff --git a/lib/kasprintf.c b/lib/kasprintf.c
new file mode 100644
index 000000000..cd2f5974e
--- /dev/null
+++ b/lib/kasprintf.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/lib/kasprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/stdarg.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+/* Simplified asprintf. */
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
+{
+ unsigned int first, second;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ first = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = kmalloc_track_caller(first+1, gfp);
+ if (!p)
+ return NULL;
+
+ second = vsnprintf(p, first+1, fmt, ap);
+ WARN(first != second, "different return values (%u and %u) from vsnprintf(\"%s\", ...)",
+ first, second, fmt);
+
+ return p;
+}
+EXPORT_SYMBOL(kvasprintf);
+
+/*
+ * If fmt contains no % (or is exactly %s), use kstrdup_const. If fmt
+ * (or the sole vararg) points to rodata, we will then save a memory
+ * allocation and string copy. In any case, the return value should be
+ * freed using kfree_const().
+ */
+const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap)
+{
+ if (!strchr(fmt, '%'))
+ return kstrdup_const(fmt, gfp);
+ if (!strcmp(fmt, "%s"))
+ return kstrdup_const(va_arg(ap, const char*), gfp);
+ return kvasprintf(gfp, fmt, ap);
+}
+EXPORT_SYMBOL(kvasprintf_const);
+
+char *kasprintf(gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = kvasprintf(gfp, fmt, ap);
+ va_end(ap);
+
+ return p;
+}
+EXPORT_SYMBOL(kasprintf);
diff --git a/lib/kfifo.c b/lib/kfifo.c
new file mode 100644
index 000000000..12f5a347a
--- /dev/null
+++ b/lib/kfifo.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * A generic kernel FIFO implementation
+ *
+ * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+
+/*
+ * internal helper to calculate the unused elements in a fifo
+ */
+static inline unsigned int kfifo_unused(struct __kfifo *fifo)
+{
+ return (fifo->mask + 1) - (fifo->in - fifo->out);
+}
+
+int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask)
+{
+ /*
+ * round up to the next power of 2, since our 'let the indices
+ * wrap' technique works only in this case.
+ */
+ size = roundup_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = esize;
+
+ if (size < 2) {
+ fifo->data = NULL;
+ fifo->mask = 0;
+ return -EINVAL;
+ }
+
+ fifo->data = kmalloc_array(esize, size, gfp_mask);
+
+ if (!fifo->data) {
+ fifo->mask = 0;
+ return -ENOMEM;
+ }
+ fifo->mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_alloc);
+
+void __kfifo_free(struct __kfifo *fifo)
+{
+ kfree(fifo->data);
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = 0;
+ fifo->data = NULL;
+ fifo->mask = 0;
+}
+EXPORT_SYMBOL(__kfifo_free);
+
+int __kfifo_init(struct __kfifo *fifo, void *buffer,
+ unsigned int size, size_t esize)
+{
+ size /= esize;
+
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = esize;
+ fifo->data = buffer;
+
+ if (size < 2) {
+ fifo->mask = 0;
+ return -EINVAL;
+ }
+ fifo->mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_init);
+
+static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
+ unsigned int len, unsigned int off)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ memcpy(fifo->data + off, src, l);
+ memcpy(fifo->data, src + l, len - l);
+ /*
+ * make sure that the data in the fifo is up to date before
+ * incrementing the fifo->in index counter
+ */
+ smp_wmb();
+}
+
+unsigned int __kfifo_in(struct __kfifo *fifo,
+ const void *buf, unsigned int len)
+{
+ unsigned int l;
+
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ kfifo_copy_in(fifo, buf, len, fifo->in);
+ fifo->in += len;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_in);
+
+static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
+ unsigned int len, unsigned int off)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ memcpy(dst, fifo->data + off, l);
+ memcpy(dst + l, fifo->data, len - l);
+ /*
+ * make sure that the data is copied before
+ * incrementing the fifo->out index counter
+ */
+ smp_wmb();
+}
+
+unsigned int __kfifo_out_peek(struct __kfifo *fifo,
+ void *buf, unsigned int len)
+{
+ unsigned int l;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+
+ kfifo_copy_out(fifo, buf, len, fifo->out);
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out_peek);
+
+unsigned int __kfifo_out(struct __kfifo *fifo,
+ void *buf, unsigned int len)
+{
+ len = __kfifo_out_peek(fifo, buf, len);
+ fifo->out += len;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out);
+
+static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int off,
+ unsigned int *copied)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+ unsigned long ret;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ ret = copy_from_user(fifo->data + off, from, l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret + len - l, esize);
+ else {
+ ret = copy_from_user(fifo->data, from + l, len - l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret, esize);
+ }
+ /*
+ * make sure that the data in the fifo is up to date before
+ * incrementing the fifo->in index counter
+ */
+ smp_wmb();
+ *copied = len - ret * esize;
+ /* return the number of elements which are not copied */
+ return ret;
+}
+
+int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
+ unsigned long len, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int esize = fifo->esize;
+ int err;
+
+ if (esize != 1)
+ len /= esize;
+
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
+ if (unlikely(ret)) {
+ len -= ret;
+ err = -EFAULT;
+ } else
+ err = 0;
+ fifo->in += len;
+ return err;
+}
+EXPORT_SYMBOL(__kfifo_from_user);
+
+static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
+ unsigned int len, unsigned int off, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ ret = copy_to_user(to, fifo->data + off, l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret + len - l, esize);
+ else {
+ ret = copy_to_user(to + l, fifo->data, len - l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret, esize);
+ }
+ /*
+ * make sure that the data is copied before
+ * incrementing the fifo->out index counter
+ */
+ smp_wmb();
+ *copied = len - ret * esize;
+ /* return the number of elements which are not copied */
+ return ret;
+}
+
+int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int esize = fifo->esize;
+ int err;
+
+ if (esize != 1)
+ len /= esize;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+ ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
+ if (unlikely(ret)) {
+ len -= ret;
+ err = -EFAULT;
+ } else
+ err = 0;
+ fifo->out += len;
+ return err;
+}
+EXPORT_SYMBOL(__kfifo_to_user);
+
+static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
+ int nents, unsigned int len)
+{
+ int n;
+ unsigned int l;
+ unsigned int off;
+ struct page *page;
+
+ if (!nents)
+ return 0;
+
+ if (!len)
+ return 0;
+
+ n = 0;
+ page = virt_to_page(buf);
+ off = offset_in_page(buf);
+ l = 0;
+
+ while (len >= l + PAGE_SIZE - off) {
+ struct page *npage;
+
+ l += PAGE_SIZE;
+ buf += PAGE_SIZE;
+ npage = virt_to_page(buf);
+ if (page_to_phys(page) != page_to_phys(npage) - l) {
+ sg_set_page(sgl, page, l - off, off);
+ sgl = sg_next(sgl);
+ if (++n == nents || sgl == NULL)
+ return n;
+ page = npage;
+ len -= l - off;
+ l = off = 0;
+ }
+ }
+ sg_set_page(sgl, page, len, off);
+ return n + 1;
+}
+
+static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
+ int nents, unsigned int len, unsigned int off)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+ unsigned int n;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
+ n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
+
+ return n;
+}
+
+unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len)
+{
+ unsigned int l;
+
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->in);
+}
+EXPORT_SYMBOL(__kfifo_dma_in_prepare);
+
+unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len)
+{
+ unsigned int l;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->out);
+}
+EXPORT_SYMBOL(__kfifo_dma_out_prepare);
+
+unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
+{
+ unsigned int max = (1 << (recsize << 3)) - 1;
+
+ if (len > max)
+ return max;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_max_r);
+
+#define __KFIFO_PEEK(data, out, mask) \
+ ((data)[(out) & (mask)])
+/*
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
+ */
+static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int l;
+ unsigned int mask = fifo->mask;
+ unsigned char *data = fifo->data;
+
+ l = __KFIFO_PEEK(data, fifo->out, mask);
+
+ if (--recsize)
+ l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
+
+ return l;
+}
+
+#define __KFIFO_POKE(data, in, mask, val) \
+ ( \
+ (data)[(in) & (mask)] = (unsigned char)(val) \
+ )
+
+/*
+ * __kfifo_poke_n internal helper function for storing the length of
+ * the record into the fifo
+ */
+static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
+{
+ unsigned int mask = fifo->mask;
+ unsigned char *data = fifo->data;
+
+ __KFIFO_POKE(data, fifo->in, mask, n);
+
+ if (recsize > 1)
+ __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
+}
+
+unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
+{
+ return __kfifo_peek_n(fifo, recsize);
+}
+EXPORT_SYMBOL(__kfifo_len_r);
+
+unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
+ unsigned int len, size_t recsize)
+{
+ if (len + recsize > kfifo_unused(fifo))
+ return 0;
+
+ __kfifo_poke_n(fifo, len, recsize);
+
+ kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
+ fifo->in += len + recsize;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_in_r);
+
+static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
+ void *buf, unsigned int len, size_t recsize, unsigned int *n)
+{
+ *n = __kfifo_peek_n(fifo, recsize);
+
+ if (len > *n)
+ len = *n;
+
+ kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
+ return len;
+}
+
+unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
+ unsigned int len, size_t recsize)
+{
+ unsigned int n;
+
+ if (fifo->in == fifo->out)
+ return 0;
+
+ return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+}
+EXPORT_SYMBOL(__kfifo_out_peek_r);
+
+unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
+ unsigned int len, size_t recsize)
+{
+ unsigned int n;
+
+ if (fifo->in == fifo->out)
+ return 0;
+
+ len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+ fifo->out += n + recsize;
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out_r);
+
+void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int n;
+
+ n = __kfifo_peek_n(fifo, recsize);
+ fifo->out += n + recsize;
+}
+EXPORT_SYMBOL(__kfifo_skip_r);
+
+int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
+ unsigned long len, unsigned int *copied, size_t recsize)
+{
+ unsigned long ret;
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > kfifo_unused(fifo)) {
+ *copied = 0;
+ return 0;
+ }
+
+ __kfifo_poke_n(fifo, len, recsize);
+
+ ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
+ if (unlikely(ret)) {
+ *copied = 0;
+ return -EFAULT;
+ }
+ fifo->in += len + recsize;
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_from_user_r);
+
+int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied, size_t recsize)
+{
+ unsigned long ret;
+ unsigned int n;
+
+ if (fifo->in == fifo->out) {
+ *copied = 0;
+ return 0;
+ }
+
+ n = __kfifo_peek_n(fifo, recsize);
+ if (len > n)
+ len = n;
+
+ ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
+ if (unlikely(ret)) {
+ *copied = 0;
+ return -EFAULT;
+ }
+ fifo->out += n + recsize;
+ return 0;
+}
+EXPORT_SYMBOL(__kfifo_to_user_r);
+
+unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+{
+ BUG_ON(!nents);
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > kfifo_unused(fifo))
+ return 0;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
+}
+EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
+
+void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
+ unsigned int len, size_t recsize)
+{
+ len = __kfifo_max_r(len, recsize);
+ __kfifo_poke_n(fifo, len, recsize);
+ fifo->in += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
+
+unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+{
+ BUG_ON(!nents);
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > fifo->in - fifo->out)
+ return 0;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
+}
+EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
+
+void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int len;
+
+ len = __kfifo_peek_n(fifo, recsize);
+ fifo->out += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/lib/klist.c b/lib/klist.c
new file mode 100644
index 000000000..332a4fbf1
--- /dev/null
+++ b/lib/klist.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * klist.c - Routines for manipulating klists.
+ *
+ * Copyright (C) 2005 Patrick Mochel
+ *
+ * This klist interface provides a couple of structures that wrap around
+ * struct list_head to provide explicit list "head" (struct klist) and list
+ * "node" (struct klist_node) objects. For struct klist, a spinlock is
+ * included that protects access to the actual list itself. struct
+ * klist_node provides a pointer to the klist that owns it and a kref
+ * reference count that indicates the number of current users of that node
+ * in the list.
+ *
+ * The entire point is to provide an interface for iterating over a list
+ * that is safe and allows for modification of the list during the
+ * iteration (e.g. insertion and removal), including modification of the
+ * current node on the list.
+ *
+ * It works using a 3rd object type - struct klist_iter - that is declared
+ * and initialized before an iteration. klist_next() is used to acquire the
+ * next element in the list. It returns NULL if there are no more items.
+ * Internally, that routine takes the klist's lock, decrements the
+ * reference count of the previous klist_node and increments the count of
+ * the next klist_node. It then drops the lock and returns.
+ *
+ * There are primitives for adding and removing nodes to/from a klist.
+ * When deleting, klist_del() will simply decrement the reference count.
+ * Only when the count goes to 0 is the node removed from the list.
+ * klist_remove() will try to delete the node from the list and block until
+ * it is actually removed. This is useful for objects (like devices) that
+ * have been removed from the system and must be freed (but must wait until
+ * all accessors have finished).
+ */
+
+#include <linux/klist.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+/*
+ * Use the lowest bit of n_klist to mark deleted nodes and exclude
+ * dead ones from iteration.
+ */
+#define KNODE_DEAD 1LU
+#define KNODE_KLIST_MASK ~KNODE_DEAD
+
+static struct klist *knode_klist(struct klist_node *knode)
+{
+ return (struct klist *)
+ ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
+}
+
+static bool knode_dead(struct klist_node *knode)
+{
+ return (unsigned long)knode->n_klist & KNODE_DEAD;
+}
+
+static void knode_set_klist(struct klist_node *knode, struct klist *klist)
+{
+ knode->n_klist = klist;
+ /* no knode deserves to start its life dead */
+ WARN_ON(knode_dead(knode));
+}
+
+static void knode_kill(struct klist_node *knode)
+{
+ /* and no knode should die twice ever either, see we're very humane */
+ WARN_ON(knode_dead(knode));
+ *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
+}
+
+/**
+ * klist_init - Initialize a klist structure.
+ * @k: The klist we're initializing.
+ * @get: The get function for the embedding object (NULL if none)
+ * @put: The put function for the embedding object (NULL if none)
+ *
+ * Initialises the klist structure. If the klist_node structures are
+ * going to be embedded in refcounted objects (necessary for safe
+ * deletion) then the get/put arguments are used to initialise
+ * functions that take and release references on the embedding
+ * objects.
+ */
+void klist_init(struct klist *k, void (*get)(struct klist_node *),
+ void (*put)(struct klist_node *))
+{
+ INIT_LIST_HEAD(&k->k_list);
+ spin_lock_init(&k->k_lock);
+ k->get = get;
+ k->put = put;
+}
+EXPORT_SYMBOL_GPL(klist_init);
+
+static void add_head(struct klist *k, struct klist_node *n)
+{
+ spin_lock(&k->k_lock);
+ list_add(&n->n_node, &k->k_list);
+ spin_unlock(&k->k_lock);
+}
+
+static void add_tail(struct klist *k, struct klist_node *n)
+{
+ spin_lock(&k->k_lock);
+ list_add_tail(&n->n_node, &k->k_list);
+ spin_unlock(&k->k_lock);
+}
+
+static void klist_node_init(struct klist *k, struct klist_node *n)
+{
+ INIT_LIST_HEAD(&n->n_node);
+ kref_init(&n->n_ref);
+ knode_set_klist(n, k);
+ if (k->get)
+ k->get(n);
+}
+
+/**
+ * klist_add_head - Initialize a klist_node and add it to front.
+ * @n: node we're adding.
+ * @k: klist it's going on.
+ */
+void klist_add_head(struct klist_node *n, struct klist *k)
+{
+ klist_node_init(k, n);
+ add_head(k, n);
+}
+EXPORT_SYMBOL_GPL(klist_add_head);
+
+/**
+ * klist_add_tail - Initialize a klist_node and add it to back.
+ * @n: node we're adding.
+ * @k: klist it's going on.
+ */
+void klist_add_tail(struct klist_node *n, struct klist *k)
+{
+ klist_node_init(k, n);
+ add_tail(k, n);
+}
+EXPORT_SYMBOL_GPL(klist_add_tail);
+
+/**
+ * klist_add_behind - Init a klist_node and add it after an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_behind(struct klist_node *n, struct klist_node *pos)
+{
+ struct klist *k = knode_klist(pos);
+
+ klist_node_init(k, n);
+ spin_lock(&k->k_lock);
+ list_add(&n->n_node, &pos->n_node);
+ spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_behind);
+
+/**
+ * klist_add_before - Init a klist_node and add it before an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_before(struct klist_node *n, struct klist_node *pos)
+{
+ struct klist *k = knode_klist(pos);
+
+ klist_node_init(k, n);
+ spin_lock(&k->k_lock);
+ list_add_tail(&n->n_node, &pos->n_node);
+ spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_before);
+
+struct klist_waiter {
+ struct list_head list;
+ struct klist_node *node;
+ struct task_struct *process;
+ int woken;
+};
+
+static DEFINE_SPINLOCK(klist_remove_lock);
+static LIST_HEAD(klist_remove_waiters);
+
+static void klist_release(struct kref *kref)
+{
+ struct klist_waiter *waiter, *tmp;
+ struct klist_node *n = container_of(kref, struct klist_node, n_ref);
+
+ WARN_ON(!knode_dead(n));
+ list_del(&n->n_node);
+ spin_lock(&klist_remove_lock);
+ list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
+ if (waiter->node != n)
+ continue;
+
+ list_del(&waiter->list);
+ waiter->woken = 1;
+ mb();
+ wake_up_process(waiter->process);
+ }
+ spin_unlock(&klist_remove_lock);
+ knode_set_klist(n, NULL);
+}
+
+static int klist_dec_and_del(struct klist_node *n)
+{
+ return kref_put(&n->n_ref, klist_release);
+}
+
+static void klist_put(struct klist_node *n, bool kill)
+{
+ struct klist *k = knode_klist(n);
+ void (*put)(struct klist_node *) = k->put;
+
+ spin_lock(&k->k_lock);
+ if (kill)
+ knode_kill(n);
+ if (!klist_dec_and_del(n))
+ put = NULL;
+ spin_unlock(&k->k_lock);
+ if (put)
+ put(n);
+}
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+void klist_del(struct klist_node *n)
+{
+ klist_put(n, true);
+}
+EXPORT_SYMBOL_GPL(klist_del);
+
+/**
+ * klist_remove - Decrement the refcount of node and wait for it to go away.
+ * @n: node we're removing.
+ */
+void klist_remove(struct klist_node *n)
+{
+ struct klist_waiter waiter;
+
+ waiter.node = n;
+ waiter.process = current;
+ waiter.woken = 0;
+ spin_lock(&klist_remove_lock);
+ list_add(&waiter.list, &klist_remove_waiters);
+ spin_unlock(&klist_remove_lock);
+
+ klist_del(n);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (waiter.woken)
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+}
+EXPORT_SYMBOL_GPL(klist_remove);
+
+/**
+ * klist_node_attached - Say whether a node is bound to a list or not.
+ * @n: Node that we're testing.
+ */
+int klist_node_attached(struct klist_node *n)
+{
+ return (n->n_klist != NULL);
+}
+EXPORT_SYMBOL_GPL(klist_node_attached);
+
+/**
+ * klist_iter_init_node - Initialize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter we're filling.
+ * @n: node to start with.
+ *
+ * Similar to klist_iter_init(), but starts the action off with @n,
+ * instead of with the list head.
+ */
+void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ struct klist_node *n)
+{
+ i->i_klist = k;
+ i->i_cur = NULL;
+ if (n && kref_get_unless_zero(&n->n_ref))
+ i->i_cur = n;
+}
+EXPORT_SYMBOL_GPL(klist_iter_init_node);
+
+/**
+ * klist_iter_init - Iniitalize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter structure we're filling.
+ *
+ * Similar to klist_iter_init_node(), but start with the list head.
+ */
+void klist_iter_init(struct klist *k, struct klist_iter *i)
+{
+ klist_iter_init_node(k, i, NULL);
+}
+EXPORT_SYMBOL_GPL(klist_iter_init);
+
+/**
+ * klist_iter_exit - Finish a list iteration.
+ * @i: Iterator structure.
+ *
+ * Must be called when done iterating over list, as it decrements the
+ * refcount of the current node. Necessary in case iteration exited before
+ * the end of the list was reached, and always good form.
+ */
+void klist_iter_exit(struct klist_iter *i)
+{
+ if (i->i_cur) {
+ klist_put(i->i_cur, false);
+ i->i_cur = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(klist_iter_exit);
+
+static struct klist_node *to_klist_node(struct list_head *n)
+{
+ return container_of(n, struct klist_node, n_node);
+}
+
+/**
+ * klist_prev - Ante up prev node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the prev node, increment its reference
+ * count, drop the lock, and return that prev node.
+ */
+struct klist_node *klist_prev(struct klist_iter *i)
+{
+ void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *prev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
+
+ if (last) {
+ prev = to_klist_node(last->n_node.prev);
+ if (!klist_dec_and_del(last))
+ put = NULL;
+ } else
+ prev = to_klist_node(i->i_klist->k_list.prev);
+
+ i->i_cur = NULL;
+ while (prev != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(prev))) {
+ kref_get(&prev->n_ref);
+ i->i_cur = prev;
+ break;
+ }
+ prev = to_klist_node(prev->n_node.prev);
+ }
+
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
+}
+EXPORT_SYMBOL_GPL(klist_prev);
+
+/**
+ * klist_next - Ante up next node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the next node, increment its reference
+ * count, drop the lock, and return that next node.
+ */
+struct klist_node *klist_next(struct klist_iter *i)
+{
+ void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
+
+ if (last) {
+ next = to_klist_node(last->n_node.next);
+ if (!klist_dec_and_del(last))
+ put = NULL;
+ } else
+ next = to_klist_node(i->i_klist->k_list.next);
+
+ i->i_cur = NULL;
+ while (next != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(next))) {
+ kref_get(&next->n_ref);
+ i->i_cur = next;
+ break;
+ }
+ next = to_klist_node(next->n_node.next);
+ }
+
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
+}
+EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/kobject.c b/lib/kobject.c
new file mode 100644
index 000000000..59dbcbdb1
--- /dev/null
+++ b/lib/kobject.c
@@ -0,0 +1,1127 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kobject.c - library routines for handling generic kernel objects
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2006-2007 Novell Inc.
+ *
+ * Please see the file Documentation/core-api/kobject.rst for critical information
+ * about using the kobject interface.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/export.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+/**
+ * kobject_namespace() - Return @kobj's namespace tag.
+ * @kobj: kobject in question
+ *
+ * Returns namespace tag of @kobj if its parent has namespace ops enabled
+ * and thus @kobj should have a namespace tag associated with it. Returns
+ * %NULL otherwise.
+ */
+const void *kobject_namespace(const struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj);
+
+ if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE)
+ return NULL;
+
+ return kobj->ktype->namespace(kobj);
+}
+
+/**
+ * kobject_get_ownership() - Get sysfs ownership data for @kobj.
+ * @kobj: kobject in question
+ * @uid: kernel user ID for sysfs objects
+ * @gid: kernel group ID for sysfs objects
+ *
+ * Returns initial uid/gid pair that should be used when creating sysfs
+ * representation of given kobject. Normally used to adjust ownership of
+ * objects in a container.
+ */
+void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+
+ if (kobj->ktype->get_ownership)
+ kobj->ktype->get_ownership(kobj, uid, gid);
+}
+
+static bool kobj_ns_type_is_valid(enum kobj_ns_type type)
+{
+ if ((type <= KOBJ_NS_TYPE_NONE) || (type >= KOBJ_NS_TYPES))
+ return false;
+
+ return true;
+}
+
+static int create_dir(struct kobject *kobj)
+{
+ const struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_ns_type_operations *ops;
+ int error;
+
+ error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj));
+ if (error)
+ return error;
+
+ error = sysfs_create_groups(kobj, ktype->default_groups);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
+ }
+
+ /*
+ * @kobj->sd may be deleted by an ancestor going away. Hold an
+ * extra reference so that it stays until @kobj is gone.
+ */
+ sysfs_get(kobj->sd);
+
+ /*
+ * If @kobj has ns_ops, its children need to be filtered based on
+ * their namespace tags. Enable namespace support on @kobj->sd.
+ */
+ ops = kobj_child_ns_ops(kobj);
+ if (ops) {
+ BUG_ON(!kobj_ns_type_is_valid(ops->type));
+ BUG_ON(!kobj_ns_type_registered(ops->type));
+
+ sysfs_enable_ns(kobj->sd);
+ }
+
+ return 0;
+}
+
+static int get_kobj_path_length(const struct kobject *kobj)
+{
+ int length = 1;
+ const struct kobject *parent = kobj;
+
+ /* walk up the ancestors until we hit the one pointing to the
+ * root.
+ * Add 1 to strlen for leading '/' of each level.
+ */
+ do {
+ if (kobject_name(parent) == NULL)
+ return 0;
+ length += strlen(kobject_name(parent)) + 1;
+ parent = parent->parent;
+ } while (parent);
+ return length;
+}
+
+static int fill_kobj_path(const struct kobject *kobj, char *path, int length)
+{
+ const struct kobject *parent;
+
+ --length;
+ for (parent = kobj; parent; parent = parent->parent) {
+ int cur = strlen(kobject_name(parent));
+ /* back up enough to print this name with '/' */
+ length -= cur;
+ if (length <= 0)
+ return -EINVAL;
+ memcpy(path + length, kobject_name(parent), cur);
+ *(path + --length) = '/';
+ }
+
+ pr_debug("'%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
+ kobj, __func__, path);
+
+ return 0;
+}
+
+/**
+ * kobject_get_path() - Allocate memory and fill in the path for @kobj.
+ * @kobj: kobject in question, with which to build the path
+ * @gfp_mask: the allocation type used to allocate the path
+ *
+ * Return: The newly allocated memory, caller must free with kfree().
+ */
+char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask)
+{
+ char *path;
+ int len;
+
+retry:
+ len = get_kobj_path_length(kobj);
+ if (len == 0)
+ return NULL;
+ path = kzalloc(len, gfp_mask);
+ if (!path)
+ return NULL;
+ if (fill_kobj_path(kobj, path, len)) {
+ kfree(path);
+ goto retry;
+ }
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(kobject_get_path);
+
+/* add the kobject to its kset's list */
+static void kobj_kset_join(struct kobject *kobj)
+{
+ if (!kobj->kset)
+ return;
+
+ kset_get(kobj->kset);
+ spin_lock(&kobj->kset->list_lock);
+ list_add_tail(&kobj->entry, &kobj->kset->list);
+ spin_unlock(&kobj->kset->list_lock);
+}
+
+/* remove the kobject from its kset's list */
+static void kobj_kset_leave(struct kobject *kobj)
+{
+ if (!kobj->kset)
+ return;
+
+ spin_lock(&kobj->kset->list_lock);
+ list_del_init(&kobj->entry);
+ spin_unlock(&kobj->kset->list_lock);
+ kset_put(kobj->kset);
+}
+
+static void kobject_init_internal(struct kobject *kobj)
+{
+ if (!kobj)
+ return;
+ kref_init(&kobj->kref);
+ INIT_LIST_HEAD(&kobj->entry);
+ kobj->state_in_sysfs = 0;
+ kobj->state_add_uevent_sent = 0;
+ kobj->state_remove_uevent_sent = 0;
+ kobj->state_initialized = 1;
+}
+
+
+static int kobject_add_internal(struct kobject *kobj)
+{
+ int error = 0;
+ struct kobject *parent;
+
+ if (!kobj)
+ return -ENOENT;
+
+ if (!kobj->name || !kobj->name[0]) {
+ WARN(1,
+ "kobject: (%p): attempted to be registered with empty name!\n",
+ kobj);
+ return -EINVAL;
+ }
+
+ parent = kobject_get(kobj->parent);
+
+ /* join kset if set, use it as parent if we do not already have one */
+ if (kobj->kset) {
+ if (!parent)
+ parent = kobject_get(&kobj->kset->kobj);
+ kobj_kset_join(kobj);
+ kobj->parent = parent;
+ }
+
+ pr_debug("'%s' (%p): %s: parent: '%s', set: '%s'\n",
+ kobject_name(kobj), kobj, __func__,
+ parent ? kobject_name(parent) : "<NULL>",
+ kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
+
+ error = create_dir(kobj);
+ if (error) {
+ kobj_kset_leave(kobj);
+ kobject_put(parent);
+ kobj->parent = NULL;
+
+ /* be noisy on error issues */
+ if (error == -EEXIST)
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
+ else
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
+ } else
+ kobj->state_in_sysfs = 1;
+
+ return error;
+}
+
+/**
+ * kobject_set_name_vargs() - Set the name of a kobject.
+ * @kobj: struct kobject to set the name of
+ * @fmt: format string used to build the name
+ * @vargs: vargs to format the string.
+ */
+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs)
+{
+ const char *s;
+
+ if (kobj->name && !fmt)
+ return 0;
+
+ s = kvasprintf_const(GFP_KERNEL, fmt, vargs);
+ if (!s)
+ return -ENOMEM;
+
+ /*
+ * ewww... some of these buggers have '/' in the name ... If
+ * that's the case, we need to make sure we have an actual
+ * allocated copy to modify, since kvasprintf_const may have
+ * returned something from .rodata.
+ */
+ if (strchr(s, '/')) {
+ char *t;
+
+ t = kstrdup(s, GFP_KERNEL);
+ kfree_const(s);
+ if (!t)
+ return -ENOMEM;
+ s = strreplace(t, '/', '!');
+ }
+ kfree_const(kobj->name);
+ kobj->name = s;
+
+ return 0;
+}
+
+/**
+ * kobject_set_name() - Set the name of a kobject.
+ * @kobj: struct kobject to set the name of
+ * @fmt: format string used to build the name
+ *
+ * This sets the name of the kobject. If you have already added the
+ * kobject to the system, you must call kobject_rename() in order to
+ * change the name of the kobject.
+ */
+int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
+{
+ va_list vargs;
+ int retval;
+
+ va_start(vargs, fmt);
+ retval = kobject_set_name_vargs(kobj, fmt, vargs);
+ va_end(vargs);
+
+ return retval;
+}
+EXPORT_SYMBOL(kobject_set_name);
+
+/**
+ * kobject_init() - Initialize a kobject structure.
+ * @kobj: pointer to the kobject to initialize
+ * @ktype: pointer to the ktype for this kobject.
+ *
+ * This function will properly initialize a kobject such that it can then
+ * be passed to the kobject_add() call.
+ *
+ * After this function is called, the kobject MUST be cleaned up by a call
+ * to kobject_put(), not by a call to kfree directly to ensure that all of
+ * the memory is cleaned up properly.
+ */
+void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
+{
+ char *err_str;
+
+ if (!kobj) {
+ err_str = "invalid kobject pointer!";
+ goto error;
+ }
+ if (!ktype) {
+ err_str = "must have a ktype to be initialized properly!\n";
+ goto error;
+ }
+ if (kobj->state_initialized) {
+ /* do not error out as sometimes we can recover */
+ pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
+ kobj);
+ dump_stack_lvl(KERN_ERR);
+ }
+
+ kobject_init_internal(kobj);
+ kobj->ktype = ktype;
+ return;
+
+error:
+ pr_err("kobject (%p): %s\n", kobj, err_str);
+ dump_stack_lvl(KERN_ERR);
+}
+EXPORT_SYMBOL(kobject_init);
+
+static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, va_list vargs)
+{
+ int retval;
+
+ retval = kobject_set_name_vargs(kobj, fmt, vargs);
+ if (retval) {
+ pr_err("can not set name properly!\n");
+ return retval;
+ }
+ kobj->parent = parent;
+ return kobject_add_internal(kobj);
+}
+
+/**
+ * kobject_add() - The main kobject add function.
+ * @kobj: the kobject to add
+ * @parent: pointer to the parent of the kobject.
+ * @fmt: format to name the kobject with.
+ *
+ * The kobject name is set and added to the kobject hierarchy in this
+ * function.
+ *
+ * If @parent is set, then the parent of the @kobj will be set to it.
+ * If @parent is NULL, then the parent of the @kobj will be set to the
+ * kobject associated with the kset assigned to this kobject. If no kset
+ * is assigned to the kobject, then the kobject will be located in the
+ * root of the sysfs tree.
+ *
+ * Note, no "add" uevent will be created with this call, the caller should set
+ * up all of the necessary sysfs files for the object and then call
+ * kobject_uevent() with the UEVENT_ADD parameter to ensure that
+ * userspace is properly notified of this kobject's creation.
+ *
+ * Return: If this function returns an error, kobject_put() must be
+ * called to properly clean up the memory associated with the
+ * object. Under no instance should the kobject that is passed
+ * to this function be directly freed with a call to kfree(),
+ * that can leak memory.
+ *
+ * If this function returns success, kobject_put() must also be called
+ * in order to properly clean up the memory associated with the object.
+ *
+ * In short, once this function is called, kobject_put() MUST be called
+ * when the use of the object is finished in order to properly free
+ * everything.
+ */
+int kobject_add(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, ...)
+{
+ va_list args;
+ int retval;
+
+ if (!kobj)
+ return -EINVAL;
+
+ if (!kobj->state_initialized) {
+ pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
+ kobject_name(kobj), kobj);
+ dump_stack_lvl(KERN_ERR);
+ return -EINVAL;
+ }
+ va_start(args, fmt);
+ retval = kobject_add_varg(kobj, parent, fmt, args);
+ va_end(args);
+
+ return retval;
+}
+EXPORT_SYMBOL(kobject_add);
+
+/**
+ * kobject_init_and_add() - Initialize a kobject structure and add it to
+ * the kobject hierarchy.
+ * @kobj: pointer to the kobject to initialize
+ * @ktype: pointer to the ktype for this kobject.
+ * @parent: pointer to the parent of this kobject.
+ * @fmt: the name of the kobject.
+ *
+ * This function combines the call to kobject_init() and kobject_add().
+ *
+ * If this function returns an error, kobject_put() must be called to
+ * properly clean up the memory associated with the object. This is the
+ * same type of error handling after a call to kobject_add() and kobject
+ * lifetime rules are the same here.
+ */
+int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
+ struct kobject *parent, const char *fmt, ...)
+{
+ va_list args;
+ int retval;
+
+ kobject_init(kobj, ktype);
+
+ va_start(args, fmt);
+ retval = kobject_add_varg(kobj, parent, fmt, args);
+ va_end(args);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(kobject_init_and_add);
+
+/**
+ * kobject_rename() - Change the name of an object.
+ * @kobj: object in question.
+ * @new_name: object's new name
+ *
+ * It is the responsibility of the caller to provide mutual
+ * exclusion between two different calls of kobject_rename
+ * on the same kobject and to ensure that new_name is valid and
+ * won't conflict with other kobjects.
+ */
+int kobject_rename(struct kobject *kobj, const char *new_name)
+{
+ int error = 0;
+ const char *devpath = NULL;
+ const char *dup_name = NULL, *name;
+ char *devpath_string = NULL;
+ char *envp[2];
+
+ kobj = kobject_get(kobj);
+ if (!kobj)
+ return -EINVAL;
+ if (!kobj->parent) {
+ kobject_put(kobj);
+ return -EINVAL;
+ }
+
+ devpath = kobject_get_path(kobj, GFP_KERNEL);
+ if (!devpath) {
+ error = -ENOMEM;
+ goto out;
+ }
+ devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
+ if (!devpath_string) {
+ error = -ENOMEM;
+ goto out;
+ }
+ sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
+ envp[0] = devpath_string;
+ envp[1] = NULL;
+
+ name = dup_name = kstrdup_const(new_name, GFP_KERNEL);
+ if (!name) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj));
+ if (error)
+ goto out;
+
+ /* Install the new kobject name */
+ dup_name = kobj->name;
+ kobj->name = name;
+
+ /* This function is mostly/only used for network interface.
+ * Some hotplug package track interfaces by their name and
+ * therefore want to know when the name is changed by the user. */
+ kobject_uevent_env(kobj, KOBJ_MOVE, envp);
+
+out:
+ kfree_const(dup_name);
+ kfree(devpath_string);
+ kfree(devpath);
+ kobject_put(kobj);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(kobject_rename);
+
+/**
+ * kobject_move() - Move object to another parent.
+ * @kobj: object in question.
+ * @new_parent: object's new parent (can be NULL)
+ */
+int kobject_move(struct kobject *kobj, struct kobject *new_parent)
+{
+ int error;
+ struct kobject *old_parent;
+ const char *devpath = NULL;
+ char *devpath_string = NULL;
+ char *envp[2];
+
+ kobj = kobject_get(kobj);
+ if (!kobj)
+ return -EINVAL;
+ new_parent = kobject_get(new_parent);
+ if (!new_parent) {
+ if (kobj->kset)
+ new_parent = kobject_get(&kobj->kset->kobj);
+ }
+
+ /* old object path */
+ devpath = kobject_get_path(kobj, GFP_KERNEL);
+ if (!devpath) {
+ error = -ENOMEM;
+ goto out;
+ }
+ devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL);
+ if (!devpath_string) {
+ error = -ENOMEM;
+ goto out;
+ }
+ sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
+ envp[0] = devpath_string;
+ envp[1] = NULL;
+ error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj));
+ if (error)
+ goto out;
+ old_parent = kobj->parent;
+ kobj->parent = new_parent;
+ new_parent = NULL;
+ kobject_put(old_parent);
+ kobject_uevent_env(kobj, KOBJ_MOVE, envp);
+out:
+ kobject_put(new_parent);
+ kobject_put(kobj);
+ kfree(devpath_string);
+ kfree(devpath);
+ return error;
+}
+EXPORT_SYMBOL_GPL(kobject_move);
+
+static void __kobject_del(struct kobject *kobj)
+{
+ struct kernfs_node *sd;
+ const struct kobj_type *ktype;
+
+ sd = kobj->sd;
+ ktype = get_ktype(kobj);
+
+ sysfs_remove_groups(kobj, ktype->default_groups);
+
+ /* send "remove" if the caller did not do it but sent "add" */
+ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
+ pr_debug("'%s' (%p): auto cleanup 'remove' event\n",
+ kobject_name(kobj), kobj);
+ kobject_uevent(kobj, KOBJ_REMOVE);
+ }
+
+ sysfs_remove_dir(kobj);
+ sysfs_put(sd);
+
+ kobj->state_in_sysfs = 0;
+ kobj_kset_leave(kobj);
+ kobj->parent = NULL;
+}
+
+/**
+ * kobject_del() - Unlink kobject from hierarchy.
+ * @kobj: object.
+ *
+ * This is the function that should be called to delete an object
+ * successfully added via kobject_add().
+ */
+void kobject_del(struct kobject *kobj)
+{
+ struct kobject *parent;
+
+ if (!kobj)
+ return;
+
+ parent = kobj->parent;
+ __kobject_del(kobj);
+ kobject_put(parent);
+}
+EXPORT_SYMBOL(kobject_del);
+
+/**
+ * kobject_get() - Increment refcount for object.
+ * @kobj: object.
+ */
+struct kobject *kobject_get(struct kobject *kobj)
+{
+ if (kobj) {
+ if (!kobj->state_initialized)
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
+ kobject_name(kobj), kobj);
+ kref_get(&kobj->kref);
+ }
+ return kobj;
+}
+EXPORT_SYMBOL(kobject_get);
+
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
+{
+ if (!kobj)
+ return NULL;
+ if (!kref_get_unless_zero(&kobj->kref))
+ kobj = NULL;
+ return kobj;
+}
+EXPORT_SYMBOL(kobject_get_unless_zero);
+
+/*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+ */
+static void kobject_cleanup(struct kobject *kobj)
+{
+ struct kobject *parent = kobj->parent;
+ const struct kobj_type *t = get_ktype(kobj);
+ const char *name = kobj->name;
+
+ pr_debug("'%s' (%p): %s, parent %p\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent);
+
+ /* remove from sysfs if the caller did not do it */
+ if (kobj->state_in_sysfs) {
+ pr_debug("'%s' (%p): auto cleanup kobject_del\n",
+ kobject_name(kobj), kobj);
+ __kobject_del(kobj);
+ } else {
+ /* avoid dropping the parent reference unnecessarily */
+ parent = NULL;
+ }
+
+ if (t->release) {
+ pr_debug("'%s' (%p): calling ktype release\n",
+ kobject_name(kobj), kobj);
+ t->release(kobj);
+ } else {
+ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+ kobject_name(kobj), kobj);
+ }
+
+ /* free name if we allocated it */
+ if (name) {
+ pr_debug("'%s': free name\n", name);
+ kfree_const(name);
+ }
+
+ kobject_put(parent);
+}
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+static void kobject_delayed_cleanup(struct work_struct *work)
+{
+ kobject_cleanup(container_of(to_delayed_work(work),
+ struct kobject, release));
+}
+#endif
+
+static void kobject_release(struct kref *kref)
+{
+ struct kobject *kobj = container_of(kref, struct kobject, kref);
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ unsigned long delay = HZ + HZ * get_random_u32_below(4);
+ pr_info("'%s' (%p): %s, parent %p (delayed %ld)\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent, delay);
+ INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
+
+ schedule_delayed_work(&kobj->release, delay);
+#else
+ kobject_cleanup(kobj);
+#endif
+}
+
+/**
+ * kobject_put() - Decrement refcount for object.
+ * @kobj: object.
+ *
+ * Decrement the refcount, and if 0, call kobject_cleanup().
+ */
+void kobject_put(struct kobject *kobj)
+{
+ if (kobj) {
+ if (!kobj->state_initialized)
+ WARN(1, KERN_WARNING
+ "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
+ kobject_name(kobj), kobj);
+ kref_put(&kobj->kref, kobject_release);
+ }
+}
+EXPORT_SYMBOL(kobject_put);
+
+static void dynamic_kobj_release(struct kobject *kobj)
+{
+ pr_debug("(%p): %s\n", kobj, __func__);
+ kfree(kobj);
+}
+
+static const struct kobj_type dynamic_kobj_ktype = {
+ .release = dynamic_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/**
+ * kobject_create() - Create a struct kobject dynamically.
+ *
+ * This function creates a kobject structure dynamically and sets it up
+ * to be a "dynamic" kobject with a default release function set up.
+ *
+ * If the kobject was not able to be created, NULL will be returned.
+ * The kobject structure returned from here must be cleaned up with a
+ * call to kobject_put() and not kfree(), as kobject_init() has
+ * already been called on this structure.
+ */
+static struct kobject *kobject_create(void)
+{
+ struct kobject *kobj;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (!kobj)
+ return NULL;
+
+ kobject_init(kobj, &dynamic_kobj_ktype);
+ return kobj;
+}
+
+/**
+ * kobject_create_and_add() - Create a struct kobject dynamically and
+ * register it with sysfs.
+ * @name: the name for the kobject
+ * @parent: the parent kobject of this kobject, if any.
+ *
+ * This function creates a kobject structure dynamically and registers it
+ * with sysfs. When you are finished with this structure, call
+ * kobject_put() and the structure will be dynamically freed when
+ * it is no longer being used.
+ *
+ * If the kobject was not able to be created, NULL will be returned.
+ */
+struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
+{
+ struct kobject *kobj;
+ int retval;
+
+ kobj = kobject_create();
+ if (!kobj)
+ return NULL;
+
+ retval = kobject_add(kobj, parent, "%s", name);
+ if (retval) {
+ pr_warn("%s: kobject_add error: %d\n", __func__, retval);
+ kobject_put(kobj);
+ kobj = NULL;
+ }
+ return kobj;
+}
+EXPORT_SYMBOL_GPL(kobject_create_and_add);
+
+/**
+ * kset_init() - Initialize a kset for use.
+ * @k: kset
+ */
+void kset_init(struct kset *k)
+{
+ kobject_init_internal(&k->kobj);
+ INIT_LIST_HEAD(&k->list);
+ spin_lock_init(&k->list_lock);
+}
+
+/* default kobject attribute operations */
+static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show)
+ ret = kattr->show(kobj, kattr, buf);
+ return ret;
+}
+
+static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store)
+ ret = kattr->store(kobj, kattr, buf, count);
+ return ret;
+}
+
+const struct sysfs_ops kobj_sysfs_ops = {
+ .show = kobj_attr_show,
+ .store = kobj_attr_store,
+};
+EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
+
+/**
+ * kset_register() - Initialize and add a kset.
+ * @k: kset.
+ *
+ * NOTE: On error, the kset.kobj.name allocated by() kobj_set_name()
+ * is freed, it can not be used any more.
+ */
+int kset_register(struct kset *k)
+{
+ int err;
+
+ if (!k)
+ return -EINVAL;
+
+ if (!k->kobj.ktype) {
+ pr_err("must have a ktype to be initialized properly!\n");
+ return -EINVAL;
+ }
+
+ kset_init(k);
+ err = kobject_add_internal(&k->kobj);
+ if (err) {
+ kfree_const(k->kobj.name);
+ /* Set it to NULL to avoid accessing bad pointer in callers. */
+ k->kobj.name = NULL;
+ return err;
+ }
+ kobject_uevent(&k->kobj, KOBJ_ADD);
+ return 0;
+}
+EXPORT_SYMBOL(kset_register);
+
+/**
+ * kset_unregister() - Remove a kset.
+ * @k: kset.
+ */
+void kset_unregister(struct kset *k)
+{
+ if (!k)
+ return;
+ kobject_del(&k->kobj);
+ kobject_put(&k->kobj);
+}
+EXPORT_SYMBOL(kset_unregister);
+
+/**
+ * kset_find_obj() - Search for object in kset.
+ * @kset: kset we're looking in.
+ * @name: object's name.
+ *
+ * Lock kset via @kset->subsys, and iterate over @kset->list,
+ * looking for a matching kobject. If matching object is found
+ * take a reference and return the object.
+ */
+struct kobject *kset_find_obj(struct kset *kset, const char *name)
+{
+ struct kobject *k;
+ struct kobject *ret = NULL;
+
+ spin_lock(&kset->list_lock);
+
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
+
+ spin_unlock(&kset->list_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kset_find_obj);
+
+static void kset_release(struct kobject *kobj)
+{
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+ pr_debug("'%s' (%p): %s\n",
+ kobject_name(kobj), kobj, __func__);
+ kfree(kset);
+}
+
+static void kset_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ if (kobj->parent)
+ kobject_get_ownership(kobj->parent, uid, gid);
+}
+
+static const struct kobj_type kset_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = kset_release,
+ .get_ownership = kset_get_ownership,
+};
+
+/**
+ * kset_create() - Create a struct kset dynamically.
+ *
+ * @name: the name for the kset
+ * @uevent_ops: a struct kset_uevent_ops for the kset
+ * @parent_kobj: the parent kobject of this kset, if any.
+ *
+ * This function creates a kset structure dynamically. This structure can
+ * then be registered with the system and show up in sysfs with a call to
+ * kset_register(). When you are finished with this structure, if
+ * kset_register() has been called, call kset_unregister() and the
+ * structure will be dynamically freed when it is no longer being used.
+ *
+ * If the kset was not able to be created, NULL will be returned.
+ */
+static struct kset *kset_create(const char *name,
+ const struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
+{
+ struct kset *kset;
+ int retval;
+
+ kset = kzalloc(sizeof(*kset), GFP_KERNEL);
+ if (!kset)
+ return NULL;
+ retval = kobject_set_name(&kset->kobj, "%s", name);
+ if (retval) {
+ kfree(kset);
+ return NULL;
+ }
+ kset->uevent_ops = uevent_ops;
+ kset->kobj.parent = parent_kobj;
+
+ /*
+ * The kobject of this kset will have a type of kset_ktype and belong to
+ * no kset itself. That way we can properly free it when it is
+ * finished being used.
+ */
+ kset->kobj.ktype = &kset_ktype;
+ kset->kobj.kset = NULL;
+
+ return kset;
+}
+
+/**
+ * kset_create_and_add() - Create a struct kset dynamically and add it to sysfs.
+ *
+ * @name: the name for the kset
+ * @uevent_ops: a struct kset_uevent_ops for the kset
+ * @parent_kobj: the parent kobject of this kset, if any.
+ *
+ * This function creates a kset structure dynamically and registers it
+ * with sysfs. When you are finished with this structure, call
+ * kset_unregister() and the structure will be dynamically freed when it
+ * is no longer being used.
+ *
+ * If the kset was not able to be created, NULL will be returned.
+ */
+struct kset *kset_create_and_add(const char *name,
+ const struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
+{
+ struct kset *kset;
+ int error;
+
+ kset = kset_create(name, uevent_ops, parent_kobj);
+ if (!kset)
+ return NULL;
+ error = kset_register(kset);
+ if (error) {
+ kfree(kset);
+ return NULL;
+ }
+ return kset;
+}
+EXPORT_SYMBOL_GPL(kset_create_and_add);
+
+
+static DEFINE_SPINLOCK(kobj_ns_type_lock);
+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
+{
+ enum kobj_ns_type type = ops->type;
+ int error;
+
+ spin_lock(&kobj_ns_type_lock);
+
+ error = -EINVAL;
+ if (!kobj_ns_type_is_valid(type))
+ goto out;
+
+ error = -EBUSY;
+ if (kobj_ns_ops_tbl[type])
+ goto out;
+
+ error = 0;
+ kobj_ns_ops_tbl[type] = ops;
+
+out:
+ spin_unlock(&kobj_ns_type_lock);
+ return error;
+}
+
+int kobj_ns_type_registered(enum kobj_ns_type type)
+{
+ int registered = 0;
+
+ spin_lock(&kobj_ns_type_lock);
+ if (kobj_ns_type_is_valid(type))
+ registered = kobj_ns_ops_tbl[type] != NULL;
+ spin_unlock(&kobj_ns_type_lock);
+
+ return registered;
+}
+
+const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent)
+{
+ const struct kobj_ns_type_operations *ops = NULL;
+
+ if (parent && parent->ktype->child_ns_type)
+ ops = parent->ktype->child_ns_type(parent);
+
+ return ops;
+}
+
+const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj)
+{
+ return kobj_child_ns_ops(kobj->parent);
+}
+
+bool kobj_ns_current_may_mount(enum kobj_ns_type type)
+{
+ bool may_mount = true;
+
+ spin_lock(&kobj_ns_type_lock);
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
+ may_mount = kobj_ns_ops_tbl[type]->current_may_mount();
+ spin_unlock(&kobj_ns_type_lock);
+
+ return may_mount;
+}
+
+void *kobj_ns_grab_current(enum kobj_ns_type type)
+{
+ void *ns = NULL;
+
+ spin_lock(&kobj_ns_type_lock);
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
+ ns = kobj_ns_ops_tbl[type]->grab_current_ns();
+ spin_unlock(&kobj_ns_type_lock);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
+
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
+{
+ const void *ns = NULL;
+
+ spin_lock(&kobj_ns_type_lock);
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
+ ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
+ spin_unlock(&kobj_ns_type_lock);
+
+ return ns;
+}
+
+const void *kobj_ns_initial(enum kobj_ns_type type)
+{
+ const void *ns = NULL;
+
+ spin_lock(&kobj_ns_type_lock);
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
+ ns = kobj_ns_ops_tbl[type]->initial_ns();
+ spin_unlock(&kobj_ns_type_lock);
+
+ return ns;
+}
+
+void kobj_ns_drop(enum kobj_ns_type type, void *ns)
+{
+ spin_lock(&kobj_ns_type_lock);
+ if (kobj_ns_type_is_valid(type) &&
+ kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
+ kobj_ns_ops_tbl[type]->drop_ns(ns);
+ spin_unlock(&kobj_ns_type_lock);
+}
+EXPORT_SYMBOL_GPL(kobj_ns_drop);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
new file mode 100644
index 000000000..7c44b7ae4
--- /dev/null
+++ b/lib/kobject_uevent.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kernel userspace event delivery
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 Novell, Inc. All rights reserved.
+ * Copyright (C) 2004 IBM, Inc. All rights reserved.
+ *
+ * Authors:
+ * Robert Love <rml@novell.com>
+ * Kay Sievers <kay.sievers@vrfy.org>
+ * Arjan van de Ven <arjanv@redhat.com>
+ * Greg Kroah-Hartman <greg@kroah.com>
+ */
+
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/kobject.h>
+#include <linux/export.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/uidgid.h>
+#include <linux/uuid.h>
+#include <linux/ctype.h>
+#include <net/sock.h>
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+
+
+u64 uevent_seqnum;
+#ifdef CONFIG_UEVENT_HELPER
+char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
+#endif
+
+struct uevent_sock {
+ struct list_head list;
+ struct sock *sk;
+};
+
+#ifdef CONFIG_NET
+static LIST_HEAD(uevent_sock_list);
+#endif
+
+/* This lock protects uevent_seqnum and uevent_sock_list */
+static DEFINE_MUTEX(uevent_sock_mutex);
+
+/* the strings here must match the enum in include/linux/kobject.h */
+static const char *kobject_actions[] = {
+ [KOBJ_ADD] = "add",
+ [KOBJ_REMOVE] = "remove",
+ [KOBJ_CHANGE] = "change",
+ [KOBJ_MOVE] = "move",
+ [KOBJ_ONLINE] = "online",
+ [KOBJ_OFFLINE] = "offline",
+ [KOBJ_BIND] = "bind",
+ [KOBJ_UNBIND] = "unbind",
+};
+
+static int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type,
+ const char **args)
+{
+ enum kobject_action action;
+ size_t count_first;
+ const char *args_start;
+ int ret = -EINVAL;
+
+ if (count && (buf[count-1] == '\n' || buf[count-1] == '\0'))
+ count--;
+
+ if (!count)
+ goto out;
+
+ args_start = strnchr(buf, count, ' ');
+ if (args_start) {
+ count_first = args_start - buf;
+ args_start = args_start + 1;
+ } else
+ count_first = count;
+
+ for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) {
+ if (strncmp(kobject_actions[action], buf, count_first) != 0)
+ continue;
+ if (kobject_actions[action][count_first] != '\0')
+ continue;
+ if (args)
+ *args = args_start;
+ *type = action;
+ ret = 0;
+ break;
+ }
+out:
+ return ret;
+}
+
+static const char *action_arg_word_end(const char *buf, const char *buf_end,
+ char delim)
+{
+ const char *next = buf;
+
+ while (next <= buf_end && *next != delim)
+ if (!isalnum(*next++))
+ return NULL;
+
+ if (next == buf)
+ return NULL;
+
+ return next;
+}
+
+static int kobject_action_args(const char *buf, size_t count,
+ struct kobj_uevent_env **ret_env)
+{
+ struct kobj_uevent_env *env = NULL;
+ const char *next, *buf_end, *key;
+ int key_len;
+ int r = -EINVAL;
+
+ if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0'))
+ count--;
+
+ if (!count)
+ return -EINVAL;
+
+ env = kzalloc(sizeof(*env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ /* first arg is UUID */
+ if (count < UUID_STRING_LEN || !uuid_is_valid(buf) ||
+ add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf))
+ goto out;
+
+ /*
+ * the rest are custom environment variables in KEY=VALUE
+ * format with ' ' delimiter between each KEY=VALUE pair
+ */
+ next = buf + UUID_STRING_LEN;
+ buf_end = buf + count - 1;
+
+ while (next <= buf_end) {
+ if (*next != ' ')
+ goto out;
+
+ /* skip the ' ', key must follow */
+ key = ++next;
+ if (key > buf_end)
+ goto out;
+
+ buf = next;
+ next = action_arg_word_end(buf, buf_end, '=');
+ if (!next || next > buf_end || *next != '=')
+ goto out;
+ key_len = next - buf;
+
+ /* skip the '=', value must follow */
+ if (++next > buf_end)
+ goto out;
+
+ buf = next;
+ next = action_arg_word_end(buf, buf_end, ' ');
+ if (!next)
+ goto out;
+
+ if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s",
+ key_len, key, (int) (next - buf), buf))
+ goto out;
+ }
+
+ r = 0;
+out:
+ if (r)
+ kfree(env);
+ else
+ *ret_env = env;
+ return r;
+}
+
+/**
+ * kobject_synth_uevent - send synthetic uevent with arguments
+ *
+ * @kobj: struct kobject for which synthetic uevent is to be generated
+ * @buf: buffer containing action type and action args, newline is ignored
+ * @count: length of buffer
+ *
+ * Returns 0 if kobject_synthetic_uevent() is completed with success or the
+ * corresponding error when it fails.
+ */
+int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
+{
+ char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL };
+ enum kobject_action action;
+ const char *action_args;
+ struct kobj_uevent_env *env;
+ const char *msg = NULL, *devpath;
+ int r;
+
+ r = kobject_action_type(buf, count, &action, &action_args);
+ if (r) {
+ msg = "unknown uevent action string";
+ goto out;
+ }
+
+ if (!action_args) {
+ r = kobject_uevent_env(kobj, action, no_uuid_envp);
+ goto out;
+ }
+
+ r = kobject_action_args(action_args,
+ count - (action_args - buf), &env);
+ if (r == -EINVAL) {
+ msg = "incorrect uevent action arguments";
+ goto out;
+ }
+
+ if (r)
+ goto out;
+
+ r = kobject_uevent_env(kobj, action, env->envp);
+ kfree(env);
+out:
+ if (r) {
+ devpath = kobject_get_path(kobj, GFP_KERNEL);
+ pr_warn("synth uevent: %s: %s\n",
+ devpath ?: "unknown device",
+ msg ?: "failed to send uevent");
+ kfree(devpath);
+ }
+ return r;
+}
+
+#ifdef CONFIG_UEVENT_HELPER
+static int kobj_usermode_filter(struct kobject *kobj)
+{
+ const struct kobj_ns_type_operations *ops;
+
+ ops = kobj_ns_ops(kobj);
+ if (ops) {
+ const void *init_ns, *ns;
+
+ ns = kobj->ktype->namespace(kobj);
+ init_ns = ops->initial_ns();
+ return ns != init_ns;
+ }
+
+ return 0;
+}
+
+static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
+{
+ int buffer_size = sizeof(env->buf) - env->buflen;
+ int len;
+
+ len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
+ if (len >= buffer_size) {
+ pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
+ buffer_size, len);
+ return -ENOMEM;
+ }
+
+ env->argv[0] = uevent_helper;
+ env->argv[1] = &env->buf[env->buflen];
+ env->argv[2] = NULL;
+
+ env->buflen += len + 1;
+ return 0;
+}
+
+static void cleanup_uevent_env(struct subprocess_info *info)
+{
+ kfree(info->data);
+}
+#endif
+
+#ifdef CONFIG_NET
+static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ struct netlink_skb_parms *parms;
+ struct sk_buff *skb = NULL;
+ char *scratch;
+ size_t len;
+
+ /* allocate message with maximum possible size */
+ len = strlen(action_string) + strlen(devpath) + 2;
+ skb = alloc_skb(len + env->buflen, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ /* add header */
+ scratch = skb_put(skb, len);
+ sprintf(scratch, "%s@%s", action_string, devpath);
+
+ skb_put_data(skb, env->buf, env->buflen);
+
+ parms = &NETLINK_CB(skb);
+ parms->creds.uid = GLOBAL_ROOT_UID;
+ parms->creds.gid = GLOBAL_ROOT_GID;
+ parms->dst_group = 1;
+ parms->portid = 0;
+
+ return skb;
+}
+
+static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ struct sk_buff *skb = NULL;
+ struct uevent_sock *ue_sk;
+ int retval = 0;
+
+ /* send netlink message */
+ list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+ struct sock *uevent_sock = ue_sk->sk;
+
+ if (!netlink_has_listeners(uevent_sock, 1))
+ continue;
+
+ if (!skb) {
+ retval = -ENOMEM;
+ skb = alloc_uevent_skb(env, action_string, devpath);
+ if (!skb)
+ continue;
+ }
+
+ retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1,
+ GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (retval == -ENOBUFS || retval == -ESRCH)
+ retval = 0;
+ }
+ consume_skb(skb);
+
+ return retval;
+}
+
+static int uevent_net_broadcast_tagged(struct sock *usk,
+ struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ struct user_namespace *owning_user_ns = sock_net(usk)->user_ns;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ skb = alloc_uevent_skb(env, action_string, devpath);
+ if (!skb)
+ return -ENOMEM;
+
+ /* fix credentials */
+ if (owning_user_ns != &init_user_ns) {
+ struct netlink_skb_parms *parms = &NETLINK_CB(skb);
+ kuid_t root_uid;
+ kgid_t root_gid;
+
+ /* fix uid */
+ root_uid = make_kuid(owning_user_ns, 0);
+ if (uid_valid(root_uid))
+ parms->creds.uid = root_uid;
+
+ /* fix gid */
+ root_gid = make_kgid(owning_user_ns, 0);
+ if (gid_valid(root_gid))
+ parms->creds.gid = root_gid;
+ }
+
+ ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (ret == -ENOBUFS || ret == -ESRCH)
+ ret = 0;
+
+ return ret;
+}
+#endif
+
+static int kobject_uevent_net_broadcast(struct kobject *kobj,
+ struct kobj_uevent_env *env,
+ const char *action_string,
+ const char *devpath)
+{
+ int ret = 0;
+
+#ifdef CONFIG_NET
+ const struct kobj_ns_type_operations *ops;
+ const struct net *net = NULL;
+
+ ops = kobj_ns_ops(kobj);
+ if (!ops && kobj->kset) {
+ struct kobject *ksobj = &kobj->kset->kobj;
+
+ if (ksobj->parent != NULL)
+ ops = kobj_ns_ops(ksobj->parent);
+ }
+
+ /* kobjects currently only carry network namespace tags and they
+ * are the only tag relevant here since we want to decide which
+ * network namespaces to broadcast the uevent into.
+ */
+ if (ops && ops->netlink_ns && kobj->ktype->namespace)
+ if (ops->type == KOBJ_NS_TYPE_NET)
+ net = kobj->ktype->namespace(kobj);
+
+ if (!net)
+ ret = uevent_net_broadcast_untagged(env, action_string,
+ devpath);
+ else
+ ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
+ action_string, devpath);
+#endif
+
+ return ret;
+}
+
+static void zap_modalias_env(struct kobj_uevent_env *env)
+{
+ static const char modalias_prefix[] = "MODALIAS=";
+ size_t len;
+ int i, j;
+
+ for (i = 0; i < env->envp_idx;) {
+ if (strncmp(env->envp[i], modalias_prefix,
+ sizeof(modalias_prefix) - 1)) {
+ i++;
+ continue;
+ }
+
+ len = strlen(env->envp[i]) + 1;
+
+ if (i != env->envp_idx - 1) {
+ memmove(env->envp[i], env->envp[i + 1],
+ env->buflen - len);
+
+ for (j = i; j < env->envp_idx - 1; j++)
+ env->envp[j] = env->envp[j + 1] - len;
+ }
+
+ env->envp_idx--;
+ env->buflen -= len;
+ }
+}
+
+/**
+ * kobject_uevent_env - send an uevent with environmental data
+ *
+ * @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
+ * @envp_ext: pointer to environmental data
+ *
+ * Returns 0 if kobject_uevent_env() is completed with success or the
+ * corresponding error when it fails.
+ */
+int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ char *envp_ext[])
+{
+ struct kobj_uevent_env *env;
+ const char *action_string = kobject_actions[action];
+ const char *devpath = NULL;
+ const char *subsystem;
+ struct kobject *top_kobj;
+ struct kset *kset;
+ const struct kset_uevent_ops *uevent_ops;
+ int i = 0;
+ int retval = 0;
+
+ /*
+ * Mark "remove" event done regardless of result, for some subsystems
+ * do not want to re-trigger "remove" event via automatic cleanup.
+ */
+ if (action == KOBJ_REMOVE)
+ kobj->state_remove_uevent_sent = 1;
+
+ pr_debug("kobject: '%s' (%p): %s\n",
+ kobject_name(kobj), kobj, __func__);
+
+ /* search the kset we belong to */
+ top_kobj = kobj;
+ while (!top_kobj->kset && top_kobj->parent)
+ top_kobj = top_kobj->parent;
+
+ if (!top_kobj->kset) {
+ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
+ "without kset!\n", kobject_name(kobj), kobj,
+ __func__);
+ return -EINVAL;
+ }
+
+ kset = top_kobj->kset;
+ uevent_ops = kset->uevent_ops;
+
+ /* skip the event, if uevent_suppress is set*/
+ if (kobj->uevent_suppress) {
+ pr_debug("kobject: '%s' (%p): %s: uevent_suppress "
+ "caused the event to drop!\n",
+ kobject_name(kobj), kobj, __func__);
+ return 0;
+ }
+ /* skip the event, if the filter returns zero. */
+ if (uevent_ops && uevent_ops->filter)
+ if (!uevent_ops->filter(kobj)) {
+ pr_debug("kobject: '%s' (%p): %s: filter function "
+ "caused the event to drop!\n",
+ kobject_name(kobj), kobj, __func__);
+ return 0;
+ }
+
+ /* originating subsystem */
+ if (uevent_ops && uevent_ops->name)
+ subsystem = uevent_ops->name(kobj);
+ else
+ subsystem = kobject_name(&kset->kobj);
+ if (!subsystem) {
+ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
+ "event to drop!\n", kobject_name(kobj), kobj,
+ __func__);
+ return 0;
+ }
+
+ /* environment buffer */
+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ /* complete object path */
+ devpath = kobject_get_path(kobj, GFP_KERNEL);
+ if (!devpath) {
+ retval = -ENOENT;
+ goto exit;
+ }
+
+ /* default keys */
+ retval = add_uevent_var(env, "ACTION=%s", action_string);
+ if (retval)
+ goto exit;
+ retval = add_uevent_var(env, "DEVPATH=%s", devpath);
+ if (retval)
+ goto exit;
+ retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem);
+ if (retval)
+ goto exit;
+
+ /* keys passed in from the caller */
+ if (envp_ext) {
+ for (i = 0; envp_ext[i]; i++) {
+ retval = add_uevent_var(env, "%s", envp_ext[i]);
+ if (retval)
+ goto exit;
+ }
+ }
+
+ /* let the kset specific function add its stuff */
+ if (uevent_ops && uevent_ops->uevent) {
+ retval = uevent_ops->uevent(kobj, env);
+ if (retval) {
+ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
+ "%d\n", kobject_name(kobj), kobj,
+ __func__, retval);
+ goto exit;
+ }
+ }
+
+ switch (action) {
+ case KOBJ_ADD:
+ /*
+ * Mark "add" event so we can make sure we deliver "remove"
+ * event to userspace during automatic cleanup. If
+ * the object did send an "add" event, "remove" will
+ * automatically generated by the core, if not already done
+ * by the caller.
+ */
+ kobj->state_add_uevent_sent = 1;
+ break;
+
+ case KOBJ_UNBIND:
+ zap_modalias_env(env);
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_lock(&uevent_sock_mutex);
+ /* we will send an event, so request a new sequence number */
+ retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
+ if (retval) {
+ mutex_unlock(&uevent_sock_mutex);
+ goto exit;
+ }
+ retval = kobject_uevent_net_broadcast(kobj, env, action_string,
+ devpath);
+ mutex_unlock(&uevent_sock_mutex);
+
+#ifdef CONFIG_UEVENT_HELPER
+ /* call uevent_helper, usually only enabled during early boot */
+ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
+ struct subprocess_info *info;
+
+ retval = add_uevent_var(env, "HOME=/");
+ if (retval)
+ goto exit;
+ retval = add_uevent_var(env,
+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
+ if (retval)
+ goto exit;
+ retval = init_uevent_argv(env, subsystem);
+ if (retval)
+ goto exit;
+
+ retval = -ENOMEM;
+ info = call_usermodehelper_setup(env->argv[0], env->argv,
+ env->envp, GFP_KERNEL,
+ NULL, cleanup_uevent_env, env);
+ if (info) {
+ retval = call_usermodehelper_exec(info, UMH_NO_WAIT);
+ env = NULL; /* freed by cleanup_uevent_env */
+ }
+ }
+#endif
+
+exit:
+ kfree(devpath);
+ kfree(env);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(kobject_uevent_env);
+
+/**
+ * kobject_uevent - notify userspace by sending an uevent
+ *
+ * @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
+ *
+ * Returns 0 if kobject_uevent() is completed with success or the
+ * corresponding error when it fails.
+ */
+int kobject_uevent(struct kobject *kobj, enum kobject_action action)
+{
+ return kobject_uevent_env(kobj, action, NULL);
+}
+EXPORT_SYMBOL_GPL(kobject_uevent);
+
+/**
+ * add_uevent_var - add key value string to the environment buffer
+ * @env: environment buffer structure
+ * @format: printf format for the key=value pair
+ *
+ * Returns 0 if environment variable was added successfully or -ENOMEM
+ * if no space was available.
+ */
+int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+{
+ va_list args;
+ int len;
+
+ if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
+ WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
+ return -ENOMEM;
+ }
+
+ va_start(args, format);
+ len = vsnprintf(&env->buf[env->buflen],
+ sizeof(env->buf) - env->buflen,
+ format, args);
+ va_end(args);
+
+ if (len >= (sizeof(env->buf) - env->buflen)) {
+ WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
+ return -ENOMEM;
+ }
+
+ env->envp[env->envp_idx++] = &env->buf[env->buflen];
+ env->buflen += len + 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(add_uevent_var);
+
+#if defined(CONFIG_NET)
+static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
+ struct netlink_ext_ack *extack)
+{
+ /* u64 to chars: 2^64 - 1 = 21 chars */
+ char buf[sizeof("SEQNUM=") + 21];
+ struct sk_buff *skbc;
+ int ret;
+
+ /* bump and prepare sequence number */
+ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
+ if (ret < 0 || (size_t)ret >= sizeof(buf))
+ return -ENOMEM;
+ ret++;
+
+ /* verify message does not overflow */
+ if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
+ NL_SET_ERR_MSG(extack, "uevent message too big");
+ return -EINVAL;
+ }
+
+ /* copy skb and extend to accommodate sequence number */
+ skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
+ if (!skbc)
+ return -ENOMEM;
+
+ /* append sequence number */
+ skb_put_data(skbc, buf, ret);
+
+ /* remove msg header */
+ skb_pull(skbc, NLMSG_HDRLEN);
+
+ /* set portid 0 to inform userspace message comes from kernel */
+ NETLINK_CB(skbc).portid = 0;
+ NETLINK_CB(skbc).dst_group = 1;
+
+ ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL);
+ /* ENOBUFS should be handled in userspace */
+ if (ret == -ENOBUFS || ret == -ESRCH)
+ ret = 0;
+
+ return ret;
+}
+
+static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net;
+ int ret;
+
+ if (!nlmsg_data(nlh))
+ return -EINVAL;
+
+ /*
+ * Verify that we are allowed to send messages to the target
+ * network namespace. The caller must have CAP_SYS_ADMIN in the
+ * owning user namespace of the target network namespace.
+ */
+ net = sock_net(NETLINK_CB(skb).sk);
+ if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
+ NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability");
+ return -EPERM;
+ }
+
+ mutex_lock(&uevent_sock_mutex);
+ ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
+ mutex_unlock(&uevent_sock_mutex);
+
+ return ret;
+}
+
+static void uevent_net_rcv(struct sk_buff *skb)
+{
+ netlink_rcv_skb(skb, &uevent_net_rcv_skb);
+}
+
+static int uevent_net_init(struct net *net)
+{
+ struct uevent_sock *ue_sk;
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ .input = uevent_net_rcv,
+ .flags = NL_CFG_F_NONROOT_RECV
+ };
+
+ ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
+ if (!ue_sk)
+ return -ENOMEM;
+
+ ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
+ if (!ue_sk->sk) {
+ pr_err("kobject_uevent: unable to create netlink socket!\n");
+ kfree(ue_sk);
+ return -ENODEV;
+ }
+
+ net->uevent_sock = ue_sk;
+
+ /* Restrict uevents to initial user namespace. */
+ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
+ mutex_lock(&uevent_sock_mutex);
+ list_add_tail(&ue_sk->list, &uevent_sock_list);
+ mutex_unlock(&uevent_sock_mutex);
+ }
+
+ return 0;
+}
+
+static void uevent_net_exit(struct net *net)
+{
+ struct uevent_sock *ue_sk = net->uevent_sock;
+
+ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
+ mutex_lock(&uevent_sock_mutex);
+ list_del(&ue_sk->list);
+ mutex_unlock(&uevent_sock_mutex);
+ }
+
+ netlink_kernel_release(ue_sk->sk);
+ kfree(ue_sk);
+}
+
+static struct pernet_operations uevent_net_ops = {
+ .init = uevent_net_init,
+ .exit = uevent_net_exit,
+};
+
+static int __init kobject_uevent_init(void)
+{
+ return register_pernet_subsys(&uevent_net_ops);
+}
+
+
+postcore_initcall(kobject_uevent_init);
+#endif
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
new file mode 100644
index 000000000..d586e6af5
--- /dev/null
+++ b/lib/kstrtox.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Convert integer string representation to an integer.
+ * If an integer doesn't fit into specified type, -E is returned.
+ *
+ * Integer starts with optional sign.
+ * kstrtou*() functions do not accept sign "-".
+ *
+ * Radix 0 means autodetection: leading "0x" implies radix 16,
+ * leading "0" implies radix 8, otherwise radix is 10.
+ * Autodetection hints work after optional sign, but not before.
+ *
+ * If -E is returned, result is not touched.
+ */
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kstrtox.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "kstrtox.h"
+
+noinline
+const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
+{
+ if (*base == 0) {
+ if (s[0] == '0') {
+ if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
+ *base = 16;
+ else
+ *base = 8;
+ } else
+ *base = 10;
+ }
+ if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
+ s += 2;
+ return s;
+}
+
+/*
+ * Convert non-negative integer string representation in explicitly given radix
+ * to an integer. A maximum of max_chars characters will be converted.
+ *
+ * Return number of characters consumed maybe or-ed with overflow bit.
+ * If overflow occurs, result integer (incorrect) is still returned.
+ *
+ * Don't you dare use this function.
+ */
+noinline
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
+ size_t max_chars)
+{
+ unsigned long long res;
+ unsigned int rv;
+
+ res = 0;
+ rv = 0;
+ while (max_chars--) {
+ unsigned int c = *s;
+ unsigned int lc = _tolower(c);
+ unsigned int val;
+
+ if ('0' <= c && c <= '9')
+ val = c - '0';
+ else if ('a' <= lc && lc <= 'f')
+ val = lc - 'a' + 10;
+ else
+ break;
+
+ if (val >= base)
+ break;
+ /*
+ * Check for overflow only if we are within range of
+ * it in the max base we support (16)
+ */
+ if (unlikely(res & (~0ull << 60))) {
+ if (res > div_u64(ULLONG_MAX - val, base))
+ rv |= KSTRTOX_OVERFLOW;
+ }
+ res = res * base + val;
+ rv++;
+ s++;
+ }
+ *p = res;
+ return rv;
+}
+
+noinline
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
+{
+ return _parse_integer_limit(s, base, p, INT_MAX);
+}
+
+static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+{
+ unsigned long long _res;
+ unsigned int rv;
+
+ s = _parse_integer_fixup_radix(s, &base);
+ rv = _parse_integer(s, base, &_res);
+ if (rv & KSTRTOX_OVERFLOW)
+ return -ERANGE;
+ if (rv == 0)
+ return -EINVAL;
+ s += rv;
+ if (*s == '\n')
+ s++;
+ if (*s)
+ return -EINVAL;
+ *res = _res;
+ return 0;
+}
+
+/**
+ * kstrtoull - convert a string to an unsigned long long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoull(). Return code must be checked.
+ */
+noinline
+int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
+{
+ if (s[0] == '+')
+ s++;
+ return _kstrtoull(s, base, res);
+}
+EXPORT_SYMBOL(kstrtoull);
+
+/**
+ * kstrtoll - convert a string to a long long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoll(). Return code must be checked.
+ */
+noinline
+int kstrtoll(const char *s, unsigned int base, long long *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ if (s[0] == '-') {
+ rv = _kstrtoull(s + 1, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if ((long long)-tmp > 0)
+ return -ERANGE;
+ *res = -tmp;
+ } else {
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if ((long long)tmp < 0)
+ return -ERANGE;
+ *res = tmp;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(kstrtoll);
+
+/* Internal, do not use. */
+int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (unsigned long)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(_kstrtoul);
+
+/* Internal, do not use. */
+int _kstrtol(const char *s, unsigned int base, long *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (long)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(_kstrtol);
+
+/**
+ * kstrtouint - convert a string to an unsigned int
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+ */
+noinline
+int kstrtouint(const char *s, unsigned int base, unsigned int *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (unsigned int)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtouint);
+
+/**
+ * kstrtoint - convert a string to an int
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+noinline
+int kstrtoint(const char *s, unsigned int base, int *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (int)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtoint);
+
+noinline
+int kstrtou16(const char *s, unsigned int base, u16 *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (u16)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtou16);
+
+noinline
+int kstrtos16(const char *s, unsigned int base, s16 *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (s16)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtos16);
+
+noinline
+int kstrtou8(const char *s, unsigned int base, u8 *res)
+{
+ unsigned long long tmp;
+ int rv;
+
+ rv = kstrtoull(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (u8)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtou8);
+
+noinline
+int kstrtos8(const char *s, unsigned int base, s8 *res)
+{
+ long long tmp;
+ int rv;
+
+ rv = kstrtoll(s, base, &tmp);
+ if (rv < 0)
+ return rv;
+ if (tmp != (s8)tmp)
+ return -ERANGE;
+ *res = tmp;
+ return 0;
+}
+EXPORT_SYMBOL(kstrtos8);
+
+/**
+ * kstrtobool - convert common user inputs into boolean values
+ * @s: input string
+ * @res: result
+ *
+ * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or
+ * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
+ * pointed to by res is updated upon finding a match.
+ */
+noinline
+int kstrtobool(const char *s, bool *res)
+{
+ if (!s)
+ return -EINVAL;
+
+ switch (s[0]) {
+ case 'y':
+ case 'Y':
+ case 't':
+ case 'T':
+ case '1':
+ *res = true;
+ return 0;
+ case 'n':
+ case 'N':
+ case 'f':
+ case 'F':
+ case '0':
+ *res = false;
+ return 0;
+ case 'o':
+ case 'O':
+ switch (s[1]) {
+ case 'n':
+ case 'N':
+ *res = true;
+ return 0;
+ case 'f':
+ case 'F':
+ *res = false;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(kstrtobool);
+
+/*
+ * Since "base" would be a nonsense argument, this open-codes the
+ * _from_user helper instead of using the helper macro below.
+ */
+int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
+{
+ /* Longest string needed to differentiate, newline, terminator */
+ char buf[4];
+
+ count = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, s, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ return kstrtobool(buf, res);
+}
+EXPORT_SYMBOL(kstrtobool_from_user);
+
+#define kstrto_from_user(f, g, type) \
+int f(const char __user *s, size_t count, unsigned int base, type *res) \
+{ \
+ /* sign, base 2 representation, newline, terminator */ \
+ char buf[1 + sizeof(type) * 8 + 1 + 1]; \
+ \
+ count = min(count, sizeof(buf) - 1); \
+ if (copy_from_user(buf, s, count)) \
+ return -EFAULT; \
+ buf[count] = '\0'; \
+ return g(buf, base, res); \
+} \
+EXPORT_SYMBOL(f)
+
+kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long);
+kstrto_from_user(kstrtoll_from_user, kstrtoll, long long);
+kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long);
+kstrto_from_user(kstrtol_from_user, kstrtol, long);
+kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int);
+kstrto_from_user(kstrtoint_from_user, kstrtoint, int);
+kstrto_from_user(kstrtou16_from_user, kstrtou16, u16);
+kstrto_from_user(kstrtos16_from_user, kstrtos16, s16);
+kstrto_from_user(kstrtou8_from_user, kstrtou8, u8);
+kstrto_from_user(kstrtos8_from_user, kstrtos8, s8);
diff --git a/lib/kstrtox.h b/lib/kstrtox.h
new file mode 100644
index 000000000..158c400ca
--- /dev/null
+++ b/lib/kstrtox.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LIB_KSTRTOX_H
+#define _LIB_KSTRTOX_H
+
+#define KSTRTOX_OVERFLOW (1U << 31)
+const char *_parse_integer_fixup_radix(const char *s, unsigned int *base);
+unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *res,
+ size_t max_chars);
+unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *res);
+
+#endif
diff --git a/lib/kunit/.kunitconfig b/lib/kunit/.kunitconfig
new file mode 100644
index 000000000..9235b7d42
--- /dev/null
+++ b/lib/kunit/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_KUNIT_TEST=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
new file mode 100644
index 000000000..68a6daec0
--- /dev/null
+++ b/lib/kunit/Kconfig
@@ -0,0 +1,73 @@
+#
+# KUnit base configuration
+#
+
+menuconfig KUNIT
+ tristate "KUnit - Enable support for unit tests"
+ select GLOB
+ help
+ Enables support for kernel unit tests (KUnit), a lightweight unit
+ testing and mocking framework for the Linux kernel. These tests are
+ able to be run locally on a developer's workstation without a VM or
+ special hardware when using UML. Can also be used on most other
+ architectures. For more information, please see
+ Documentation/dev-tools/kunit/.
+
+if KUNIT
+
+config KUNIT_DEBUGFS
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enable debugfs representation for kunit. Currently this consists
+ of /sys/kernel/debug/kunit/<test_suite>/results files for each
+ test suite, which allow users to see results of the last test suite
+ run that occurred.
+
+config KUNIT_TEST
+ tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enables the unit tests for the KUnit test framework. These tests test
+ the KUnit test framework itself; the tests are both written using
+ KUnit and test KUnit. This option should only be enabled for testing
+ purposes by developers interested in testing that KUnit works as
+ expected.
+
+config KUNIT_EXAMPLE_TEST
+ tristate "Example test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ help
+ Enables an example unit test that illustrates some of the basic
+ features of KUnit. This test only exists to help new users understand
+ what KUnit is and how it is used. Please refer to the example test
+ itself, lib/kunit/example-test.c, for more information. This option
+ is intended for curious hackers who would like to understand how to
+ use KUnit for kernel development.
+
+config KUNIT_ALL_TESTS
+ tristate "All KUnit tests with satisfied dependencies"
+ help
+ Enables all KUnit tests, if they can be enabled.
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+config KUNIT_DEFAULT_ENABLED
+ bool "Default value of kunit.enable"
+ default y
+ help
+ Sets the default value of kunit.enable. If set to N then KUnit
+ tests will not execute unless kunit.enable=1 is passed to the
+ kernel command line.
+
+ In most cases this should be left as Y. Only if additional opt-in
+ behavior is needed should this be set to N.
+
+endif # KUNIT
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
new file mode 100644
index 000000000..46f75f23d
--- /dev/null
+++ b/lib/kunit/Makefile
@@ -0,0 +1,26 @@
+obj-$(CONFIG_KUNIT) += kunit.o
+
+kunit-objs += test.o \
+ resource.o \
+ static_stub.o \
+ string-stream.o \
+ assert.o \
+ try-catch.o \
+ executor.o \
+ attributes.o
+
+ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
+kunit-objs += debugfs.o
+endif
+
+# KUnit 'hooks' are built-in even when KUnit is built as a module.
+obj-y += hooks.o
+
+obj-$(CONFIG_KUNIT_TEST) += kunit-test.o
+
+# string-stream-test compiles built-in only.
+ifeq ($(CONFIG_KUNIT_TEST),y)
+obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o
+endif
+
+obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
new file mode 100644
index 000000000..05a09652f
--- /dev/null
+++ b/lib/kunit/assert.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Assertion and expectation serialization API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include <kunit/assert.h>
+#include <kunit/test.h>
+
+#include "string-stream.h"
+
+void kunit_assert_prologue(const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ struct string_stream *stream)
+{
+ const char *expect_or_assert = NULL;
+
+ switch (type) {
+ case KUNIT_EXPECTATION:
+ expect_or_assert = "EXPECTATION";
+ break;
+ case KUNIT_ASSERTION:
+ expect_or_assert = "ASSERTION";
+ break;
+ }
+
+ string_stream_add(stream, "%s FAILED at %s:%d\n",
+ expect_or_assert, loc->file, loc->line);
+}
+EXPORT_SYMBOL_GPL(kunit_assert_prologue);
+
+static void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream)
+{
+ if (message->fmt)
+ string_stream_add(stream, "\n%pV", message);
+}
+
+void kunit_fail_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ string_stream_add(stream, "%pV", message);
+}
+EXPORT_SYMBOL_GPL(kunit_fail_assert_format);
+
+void kunit_unary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_unary_assert *unary_assert;
+
+ unary_assert = container_of(assert, struct kunit_unary_assert, assert);
+
+ if (unary_assert->expected_true)
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s to be true, but is false\n",
+ unary_assert->condition);
+ else
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s to be false, but is true\n",
+ unary_assert->condition);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_unary_assert_format);
+
+void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_ptr_not_err_assert *ptr_assert;
+
+ ptr_assert = container_of(assert, struct kunit_ptr_not_err_assert,
+ assert);
+
+ if (!ptr_assert->value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ ptr_assert->text);
+ } else if (IS_ERR(ptr_assert->value)) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not error, but is: %ld\n",
+ ptr_assert->text,
+ PTR_ERR(ptr_assert->value));
+ }
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
+
+/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
+static bool is_literal(struct kunit *test, const char *text, long long value,
+ gfp_t gfp)
+{
+ char *buffer;
+ int len;
+ bool ret;
+
+ len = snprintf(NULL, 0, "%lld", value);
+ if (strlen(text) != len)
+ return false;
+
+ buffer = kunit_kmalloc(test, len+1, gfp);
+ if (!buffer)
+ return false;
+
+ snprintf(buffer, len+1, "%lld", value);
+ ret = strncmp(buffer, text, len) == 0;
+
+ kunit_kfree(test, buffer);
+ return ret;
+}
+
+void kunit_binary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_binary_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ if (!is_literal(stream->test, binary_assert->text->left_text,
+ binary_assert->left_value, stream->gfp))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n",
+ binary_assert->text->left_text,
+ binary_assert->left_value,
+ binary_assert->left_value);
+ if (!is_literal(stream->test, binary_assert->text->right_text,
+ binary_assert->right_value, stream->gfp))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)",
+ binary_assert->text->right_text,
+ binary_assert->right_value,
+ binary_assert->right_value);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_binary_assert_format);
+
+void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_binary_ptr_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_ptr_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px\n",
+ binary_assert->text->left_text,
+ binary_assert->left_value);
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %px",
+ binary_assert->text->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_binary_ptr_assert_format);
+
+/* Checks if KUNIT_EXPECT_STREQ() args were string literals.
+ * Note: `text` will have ""s where as `value` will not.
+ */
+static bool is_str_literal(const char *text, const char *value)
+{
+ int len;
+
+ len = strlen(text);
+ if (len < 2)
+ return false;
+ if (text[0] != '\"' || text[len - 1] != '\"')
+ return false;
+
+ return strncmp(text + 1, value, len - 2) == 0;
+}
+
+void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_binary_str_assert *binary_assert;
+
+ binary_assert = container_of(assert, struct kunit_binary_str_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ binary_assert->text->left_text,
+ binary_assert->text->operation,
+ binary_assert->text->right_text);
+ if (!is_str_literal(binary_assert->text->left_text, binary_assert->left_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"\n",
+ binary_assert->text->left_text,
+ binary_assert->left_value);
+ if (!is_str_literal(binary_assert->text->right_text, binary_assert->right_value))
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == \"%s\"",
+ binary_assert->text->right_text,
+ binary_assert->right_value);
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
+
+/* Adds a hexdump of a buffer to a string_stream comparing it with
+ * a second buffer. The different bytes are marked with <>.
+ */
+static void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len)
+{
+ size_t i;
+ const u8 *buf1 = buf;
+ const u8 *buf2 = compared_buf;
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT);
+
+ for (i = 0; i < len; ++i) {
+ if (!(i % 16) && i)
+ string_stream_add(stream, "\n" KUNIT_SUBSUBTEST_INDENT);
+
+ if (buf1[i] != buf2[i])
+ string_stream_add(stream, "<%02x>", buf1[i]);
+ else
+ string_stream_add(stream, " %02x ", buf1[i]);
+ }
+}
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_mem_assert *mem_assert;
+
+ mem_assert = container_of(assert, struct kunit_mem_assert,
+ assert);
+
+ if (!mem_assert->left_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->left_text);
+ } else if (!mem_assert->right_value) {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s is not null, but is\n",
+ mem_assert->text->right_text);
+ } else {
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ mem_assert->text->left_text,
+ mem_assert->text->operation,
+ mem_assert->text->right_text);
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->left_text);
+ kunit_assert_hexdump(stream, mem_assert->left_value,
+ mem_assert->right_value, mem_assert->size);
+
+ string_stream_add(stream, "\n");
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->right_text);
+ kunit_assert_hexdump(stream, mem_assert->right_value,
+ mem_assert->left_value, mem_assert->size);
+
+ kunit_assert_print_msg(message, stream);
+ }
+}
+EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
new file mode 100644
index 000000000..1b512f7e1
--- /dev/null
+++ b/lib/kunit/attributes.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+
+/* Options for printing attributes:
+ * PRINT_ALWAYS - attribute is printed for every test case and suite if set
+ * PRINT_SUITE - attribute is printed for every suite if set but not for test cases
+ * PRINT_NEVER - attribute is never printed
+ */
+enum print_ops {
+ PRINT_ALWAYS,
+ PRINT_SUITE,
+ PRINT_NEVER,
+};
+
+/**
+ * struct kunit_attr - represents a test attribute and holds flexible
+ * helper functions to interact with attribute.
+ *
+ * @name: name of test attribute, eg. speed
+ * @get_attr: function to return attribute value given a test
+ * @to_string: function to return string representation of given
+ * attribute value
+ * @filter: function to indicate whether a given attribute value passes a
+ * filter
+ * @attr_default: default attribute value used during filtering
+ * @print: value of enum print_ops to indicate when to print attribute
+ */
+struct kunit_attr {
+ const char *name;
+ void *(*get_attr)(void *test_or_suite, bool is_test);
+ const char *(*to_string)(void *attr, bool *to_free);
+ int (*filter)(void *attr, const char *input, int *err);
+ void *attr_default;
+ enum print_ops print;
+};
+
+/* String Lists for enum Attributes */
+
+static const char * const speed_str_list[] = {"unset", "very_slow", "slow", "normal"};
+
+/* To String Methods */
+
+static const char *attr_enum_to_string(void *attr, const char * const str_list[], bool *to_free)
+{
+ long val = (long)attr;
+
+ *to_free = false;
+ if (!val)
+ return NULL;
+ return str_list[val];
+}
+
+static const char *attr_speed_to_string(void *attr, bool *to_free)
+{
+ return attr_enum_to_string(attr, speed_str_list, to_free);
+}
+
+static const char *attr_string_to_string(void *attr, bool *to_free)
+{
+ *to_free = false;
+ return (char *) attr;
+}
+
+/* Filter Methods */
+
+static const char op_list[] = "<>!=";
+
+/*
+ * Returns whether the inputted integer value matches the filter given
+ * by the operation string and inputted integer.
+ */
+static int int_filter(long val, const char *op, int input, int *err)
+{
+ if (!strncmp(op, "<=", 2))
+ return (val <= input);
+ else if (!strncmp(op, ">=", 2))
+ return (val >= input);
+ else if (!strncmp(op, "!=", 2))
+ return (val != input);
+ else if (!strncmp(op, ">", 1))
+ return (val > input);
+ else if (!strncmp(op, "<", 1))
+ return (val < input);
+ else if (!strncmp(op, "=", 1))
+ return (val == input);
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", op);
+ return false;
+}
+
+/*
+ * Returns whether the inputted enum value "attr" matches the filter given
+ * by the input string. Note: the str_list includes the corresponding string
+ * list to the enum values.
+ */
+static int attr_enum_filter(void *attr, const char *input, int *err,
+ const char * const str_list[], int max)
+{
+ int i, j, input_int = -1;
+ long test_val = (long)attr;
+ const char *input_val = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_val = input + i;
+ break;
+ }
+ }
+
+ if (!input_val) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ for (j = 0; j <= max; j++) {
+ if (!strcmp(input_val, str_list[j]))
+ input_int = j;
+ }
+
+ if (input_int < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(test_val, input, input_int, err);
+}
+
+static int attr_speed_filter(void *attr, const char *input, int *err)
+{
+ return attr_enum_filter(attr, input, err, speed_str_list, KUNIT_SPEED_MAX);
+}
+
+/*
+ * Returns whether the inputted string value (attr) matches the filter given
+ * by the input string.
+ */
+static int attr_string_filter(void *attr, const char *input, int *err)
+{
+ char *str = attr;
+
+ if (!strncmp(input, "<", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, ">", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, "!=", 2)) {
+ return (strcmp(input + 2, str) != 0);
+ } else if (!strncmp(input, "=", 1)) {
+ return (strcmp(input + 1, str) == 0);
+ }
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", input);
+ return false;
+}
+
+
+/* Get Attribute Methods */
+
+static void *attr_speed_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) test->attr.speed);
+ else
+ return ((void *) suite->attr.speed);
+}
+
+static void *attr_module_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ // Suites get their module attribute from their first test_case
+ if (test)
+ return ((void *) test->module_name);
+ else if (kunit_suite_num_test_cases(suite) > 0)
+ return ((void *) suite->test_cases[0].module_name);
+ else
+ return (void *) "";
+}
+
+/* List of all Test Attributes */
+
+static struct kunit_attr kunit_attr_list[] = {
+ {
+ .name = "speed",
+ .get_attr = attr_speed_get,
+ .to_string = attr_speed_to_string,
+ .filter = attr_speed_filter,
+ .attr_default = (void *)KUNIT_SPEED_NORMAL,
+ .print = PRINT_ALWAYS,
+ },
+ {
+ .name = "module",
+ .get_attr = attr_module_get,
+ .to_string = attr_string_to_string,
+ .filter = attr_string_filter,
+ .attr_default = (void *)"",
+ .print = PRINT_SUITE,
+ }
+};
+
+/* Helper Functions to Access Attributes */
+
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter)
+{
+ return filter.attr->name;
+}
+
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level)
+{
+ int i;
+ bool to_free = false;
+ void *attr;
+ const char *attr_name, *attr_str;
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ for (i = 0; i < ARRAY_SIZE(kunit_attr_list); i++) {
+ if (kunit_attr_list[i].print == PRINT_NEVER ||
+ (test && kunit_attr_list[i].print == PRINT_SUITE))
+ continue;
+ attr = kunit_attr_list[i].get_attr(test_or_suite, is_test);
+ if (attr) {
+ attr_name = kunit_attr_list[i].name;
+ attr_str = kunit_attr_list[i].to_string(attr, &to_free);
+ if (test) {
+ kunit_log(KERN_INFO, test, "%*s# %s.%s: %s",
+ KUNIT_INDENT_LEN * test_level, "", test->name,
+ attr_name, attr_str);
+ } else {
+ kunit_log(KERN_INFO, suite, "%*s# %s: %s",
+ KUNIT_INDENT_LEN * test_level, "", attr_name, attr_str);
+ }
+
+ /* Free to_string of attribute if needed */
+ if (to_free)
+ kfree(attr_str);
+ }
+ }
+}
+
+/* Helper Functions to Filter Attributes */
+
+int kunit_get_filter_count(char *input)
+{
+ int i, comma_index = 0, count = 0;
+
+ for (i = 0; input[i]; i++) {
+ if (input[i] == ',') {
+ if ((i - comma_index) > 1)
+ count++;
+ comma_index = i;
+ }
+ }
+ if ((i - comma_index) > 0)
+ count++;
+ return count;
+}
+
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err)
+{
+ struct kunit_attr_filter filter = {};
+ int i, j, comma_index = 0, new_start_index = 0;
+ int op_index = -1, attr_index = -1;
+ char op;
+ char *input = *filters;
+
+ /* Parse input until operation */
+ for (i = 0; input[i]; i++) {
+ if (op_index < 0 && strchr(op_list, input[i])) {
+ op_index = i;
+ } else if (!comma_index && input[i] == ',') {
+ comma_index = i;
+ } else if (comma_index && input[i] != ' ') {
+ new_start_index = i;
+ break;
+ }
+ }
+
+ if (op_index <= 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter operation not found: %s\n", input);
+ return filter;
+ }
+
+ /* Temporarily set operator to \0 character. */
+ op = input[op_index];
+ input[op_index] = '\0';
+
+ /* Find associated kunit_attr object */
+ for (j = 0; j < ARRAY_SIZE(kunit_attr_list); j++) {
+ if (!strcmp(input, kunit_attr_list[j].name)) {
+ attr_index = j;
+ break;
+ }
+ }
+
+ input[op_index] = op;
+
+ if (attr_index < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: attribute not found: %s\n", input);
+ } else {
+ filter.attr = &kunit_attr_list[attr_index];
+ }
+
+ if (comma_index > 0) {
+ input[comma_index] = '\0';
+ filter.input = input + op_index;
+ input = input + new_start_index;
+ } else {
+ filter.input = input + op_index;
+ input = NULL;
+ }
+
+ *filters = input;
+
+ return filter;
+}
+
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err)
+{
+ int n = 0;
+ struct kunit_case *filtered, *test_case;
+ struct kunit_suite *copy;
+ void *suite_val, *test_val;
+ bool suite_result, test_result, default_result, result;
+
+ /* Allocate memory for new copy of suite and list of test cases */
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ kunit_suite_for_each_test_case(suite, test_case) { n++; }
+
+ filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered) {
+ kfree(copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+
+ /* Save filtering result on default value */
+ default_result = filter.attr->filter(filter.attr->attr_default, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* Save suite attribute value and filtering result on that value */
+ suite_val = filter.attr->get_attr((void *)suite, false);
+ suite_result = filter.attr->filter(suite_val, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* For each test case, save test case if passes filtering. */
+ kunit_suite_for_each_test_case(suite, test_case) {
+ test_val = filter.attr->get_attr((void *) test_case, true);
+ test_result = filter.attr->filter(filter.attr->get_attr(test_case, true),
+ filter.input, err);
+ if (*err)
+ goto err;
+
+ /*
+ * If attribute value of test case is set, filter on that value.
+ * If not, filter on suite value if set. If not, filter on
+ * default value.
+ */
+ result = false;
+ if (test_val) {
+ if (test_result)
+ result = true;
+ } else if (suite_val) {
+ if (suite_result)
+ result = true;
+ } else if (default_result) {
+ result = true;
+ }
+
+ if (result) {
+ filtered[n++] = *test_case;
+ } else if (action && strcmp(action, "skip") == 0) {
+ test_case->status = KUNIT_SKIPPED;
+ filtered[n++] = *test_case;
+ }
+ }
+
+err:
+ if (n == 0 || *err) {
+ kfree(copy);
+ kfree(filtered);
+ return NULL;
+ }
+
+ copy->test_cases = filtered;
+
+ return copy;
+}
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
new file mode 100644
index 000000000..35ddb329d
--- /dev/null
+++ b/lib/kunit/debugfs.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates.
+ * Author: Alan Maguire <alan.maguire@oracle.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include <kunit/test.h>
+
+#include "string-stream.h"
+#include "debugfs.h"
+
+#define KUNIT_DEBUGFS_ROOT "kunit"
+#define KUNIT_DEBUGFS_RESULTS "results"
+
+/*
+ * Create a debugfs representation of test suites:
+ *
+ * Path Semantics
+ * /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
+ * testsuite
+ *
+ */
+
+static struct dentry *debugfs_rootdir;
+
+void kunit_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(debugfs_rootdir);
+}
+
+void kunit_debugfs_init(void)
+{
+ if (!debugfs_rootdir)
+ debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
+}
+
+static void debugfs_print_result(struct seq_file *seq,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ if (!test_case || !test_case->log)
+ return;
+
+ seq_printf(seq, "%s", test_case->log);
+}
+
+/*
+ * /sys/kernel/debug/kunit/<testsuite>/results shows all results for testsuite.
+ */
+static int debugfs_print_results(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+ enum kunit_status success;
+ struct kunit_case *test_case;
+
+ if (!suite)
+ return 0;
+
+ success = kunit_suite_has_succeeded(suite);
+
+ /* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
+ seq_puts(seq, "KTAP version 1\n");
+ seq_puts(seq, "1..1\n");
+
+ /* Print suite header because it is not stored in the test logs. */
+ seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
+ seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ debugfs_print_result(seq, suite, test_case);
+
+ if (suite->log)
+ seq_printf(seq, "%s", suite->log);
+
+ seq_printf(seq, "%s %d %s\n",
+ kunit_status_to_ok_not_ok(success), 1, suite->name);
+ return 0;
+}
+
+static int debugfs_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static int debugfs_results_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_results, suite);
+}
+
+static const struct file_operations debugfs_results_fops = {
+ .open = debugfs_results_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ /* Allocate logs before creating debugfs representation. */
+ suite->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+ kunit_suite_for_each_test_case(suite, test_case)
+ test_case->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+
+ suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
+
+ debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
+ suite->debugfs,
+ suite, &debugfs_results_fops);
+}
+
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+
+ debugfs_remove_recursive(suite->debugfs);
+ kfree(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ kfree(test_case->log);
+}
diff --git a/lib/kunit/debugfs.h b/lib/kunit/debugfs.h
new file mode 100644
index 000000000..dcc7d7556
--- /dev/null
+++ b/lib/kunit/debugfs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Oracle and/or its affiliates.
+ */
+
+#ifndef _KUNIT_DEBUGFS_H
+#define _KUNIT_DEBUGFS_H
+
+#include <kunit/test.h>
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+
+void kunit_debugfs_create_suite(struct kunit_suite *suite);
+void kunit_debugfs_destroy_suite(struct kunit_suite *suite);
+void kunit_debugfs_init(void);
+void kunit_debugfs_cleanup(void);
+
+#else
+
+static inline void kunit_debugfs_create_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_destroy_suite(struct kunit_suite *suite) { }
+
+static inline void kunit_debugfs_init(void) { }
+
+static inline void kunit_debugfs_cleanup(void) { }
+
+#endif /* CONFIG_KUNIT_DEBUGFS */
+
+#endif /* _KUNIT_DEBUGFS_H */
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
new file mode 100644
index 000000000..1236b3cd2
--- /dev/null
+++ b/lib/kunit/executor.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/reboot.h>
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+#include <linux/glob.h>
+#include <linux/moduleparam.h>
+
+/*
+ * These symbols point to the .kunit_test_suites section and are defined in
+ * include/asm-generic/vmlinux.lds.h, and consequently must be extern.
+ */
+extern struct kunit_suite * const __kunit_suites_start[];
+extern struct kunit_suite * const __kunit_suites_end[];
+
+static char *action_param;
+
+module_param_named(action, action_param, charp, 0400);
+MODULE_PARM_DESC(action,
+ "Changes KUnit executor behavior, valid values are:\n"
+ "<none>: run the tests like normal\n"
+ "'list' to list test names instead of running them.\n"
+ "'list_attr' to list test names and attributes instead of running them.\n");
+
+const char *kunit_action(void)
+{
+ return action_param;
+}
+
+static char *filter_glob_param;
+static char *filter_param;
+static char *filter_action_param;
+
+module_param_named(filter_glob, filter_glob_param, charp, 0400);
+MODULE_PARM_DESC(filter_glob,
+ "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
+module_param_named(filter, filter_param, charp, 0400);
+MODULE_PARM_DESC(filter,
+ "Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
+module_param_named(filter_action, filter_action_param, charp, 0400);
+MODULE_PARM_DESC(filter_action,
+ "Changes behavior of filtered tests using attributes, valid values are:\n"
+ "<none>: do not run filtered tests as normal\n"
+ "'skip': skip all filtered tests instead so tests will appear in output\n");
+
+const char *kunit_filter_glob(void)
+{
+ return filter_glob_param;
+}
+
+char *kunit_filter(void)
+{
+ return filter_param;
+}
+
+char *kunit_filter_action(void)
+{
+ return filter_action_param;
+}
+
+/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */
+struct kunit_glob_filter {
+ char *suite_glob;
+ char *test_glob;
+};
+
+/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
+static int kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
+ const char *filter_glob)
+{
+ const int len = strlen(filter_glob);
+ const char *period = strchr(filter_glob, '.');
+
+ if (!period) {
+ parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
+ parsed->test_glob = NULL;
+ strcpy(parsed->suite_glob, filter_glob);
+ return 0;
+ }
+
+ parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL);
+ if (!parsed->suite_glob)
+ return -ENOMEM;
+
+ parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL);
+ if (!parsed->test_glob) {
+ kfree(parsed->suite_glob);
+ return -ENOMEM;
+ }
+
+ strncpy(parsed->suite_glob, filter_glob, period - filter_glob);
+ strncpy(parsed->test_glob, period + 1, len - (period - filter_glob));
+
+ return 0;
+}
+
+/* Create a copy of suite with only tests that match test_glob. */
+static struct kunit_suite *
+kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_glob)
+{
+ int n = 0;
+ struct kunit_case *filtered, *test_case;
+ struct kunit_suite *copy;
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ if (!test_glob || glob_match(test_glob, test_case->name))
+ ++n;
+ }
+
+ if (n == 0)
+ return NULL;
+
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered) {
+ kfree(copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+ kunit_suite_for_each_test_case(suite, test_case) {
+ if (!test_glob || glob_match(test_glob, test_case->name))
+ filtered[n++] = *test_case;
+ }
+
+ copy->test_cases = filtered;
+ return copy;
+}
+
+void kunit_free_suite_set(struct kunit_suite_set suite_set)
+{
+ struct kunit_suite * const *suites;
+
+ for (suites = suite_set.start; suites < suite_set.end; suites++) {
+ kfree((*suites)->test_cases);
+ kfree(*suites);
+ }
+ kfree(suite_set.start);
+}
+
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err)
+{
+ int i, j, k;
+ int filter_count = 0;
+ struct kunit_suite **copy, **copy_start, *filtered_suite, *new_filtered_suite;
+ struct kunit_suite_set filtered = {NULL, NULL};
+ struct kunit_glob_filter parsed_glob;
+ struct kunit_attr_filter *parsed_filters = NULL;
+ struct kunit_suite * const *suites;
+
+ const size_t max = suite_set->end - suite_set->start;
+
+ copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL);
+ if (!copy) { /* won't be able to run anything, return an empty set */
+ return filtered;
+ }
+ copy_start = copy;
+
+ if (filter_glob) {
+ *err = kunit_parse_glob_filter(&parsed_glob, filter_glob);
+ if (*err)
+ goto free_copy;
+ }
+
+ /* Parse attribute filters */
+ if (filters) {
+ filter_count = kunit_get_filter_count(filters);
+ parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
+ if (!parsed_filters) {
+ *err = -ENOMEM;
+ goto free_parsed_glob;
+ }
+ for (j = 0; j < filter_count; j++)
+ parsed_filters[j] = kunit_next_attr_filter(&filters, err);
+ if (*err)
+ goto free_parsed_filters;
+ }
+
+ for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
+ filtered_suite = suite_set->start[i];
+ if (filter_glob) {
+ if (!glob_match(parsed_glob.suite_glob, filtered_suite->name))
+ continue;
+ filtered_suite = kunit_filter_glob_tests(filtered_suite,
+ parsed_glob.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto free_filtered_suite;
+ }
+ }
+ if (filter_count > 0 && parsed_filters != NULL) {
+ for (k = 0; k < filter_count; k++) {
+ new_filtered_suite = kunit_filter_attr_tests(filtered_suite,
+ parsed_filters[k], filter_action, err);
+
+ /* Free previous copy of suite */
+ if (k > 0 || filter_glob) {
+ kfree(filtered_suite->test_cases);
+ kfree(filtered_suite);
+ }
+
+ filtered_suite = new_filtered_suite;
+
+ if (*err)
+ goto free_filtered_suite;
+
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto free_filtered_suite;
+ }
+ if (!filtered_suite)
+ break;
+ }
+ }
+
+ if (!filtered_suite)
+ continue;
+
+ *copy++ = filtered_suite;
+ }
+ filtered.start = copy_start;
+ filtered.end = copy;
+
+free_filtered_suite:
+ if (*err) {
+ for (suites = copy_start; suites < copy; suites++) {
+ kfree((*suites)->test_cases);
+ kfree(*suites);
+ }
+ }
+
+free_parsed_filters:
+ if (filter_count)
+ kfree(parsed_filters);
+
+free_parsed_glob:
+ if (filter_glob) {
+ kfree(parsed_glob.suite_glob);
+ kfree(parsed_glob.test_glob);
+ }
+
+free_copy:
+ if (*err)
+ kfree(copy_start);
+
+ return filtered;
+}
+
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
+{
+ size_t num_suites = suite_set->end - suite_set->start;
+
+ if (builtin || num_suites) {
+ pr_info("KTAP version 1\n");
+ pr_info("1..%zu\n", num_suites);
+ }
+
+ __kunit_test_suites_init(suite_set->start, num_suites);
+}
+
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
+{
+ struct kunit_suite * const *suites;
+ struct kunit_case *test_case;
+
+ /* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
+ pr_info("KTAP version 1\n");
+
+ for (suites = suite_set->start; suites < suite_set->end; suites++) {
+ /* Print suite name and suite attributes */
+ pr_info("%s\n", (*suites)->name);
+ if (include_attr)
+ kunit_print_attr((void *)(*suites), false, 0);
+
+ /* Print test case name and attributes in suite */
+ kunit_suite_for_each_test_case((*suites), test_case) {
+ pr_info("%s.%s\n", (*suites)->name, test_case->name);
+ if (include_attr)
+ kunit_print_attr((void *)test_case, true, 0);
+ }
+ }
+}
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
+}
+
+int kunit_run_all_tests(void)
+{
+ struct kunit_suite_set suite_set = {
+ __kunit_suites_start, __kunit_suites_end,
+ };
+ int err = 0;
+ if (!kunit_enabled()) {
+ pr_info("kunit: disabled\n");
+ goto out;
+ }
+
+ if (filter_glob_param || filter_param) {
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
+ filter_param, filter_action_param, &err);
+ if (err) {
+ pr_err("kunit executor: error filtering suites: %d\n", err);
+ goto out;
+ }
+ }
+
+ if (!action_param)
+ kunit_exec_run_tests(&suite_set, true);
+ else if (strcmp(action_param, "list") == 0)
+ kunit_exec_list_tests(&suite_set, false);
+ else if (strcmp(action_param, "list_attr") == 0)
+ kunit_exec_list_tests(&suite_set, true);
+ else
+ pr_err("kunit executor: unknown action '%s'\n", action_param);
+
+ if (filter_glob_param || filter_param) { /* a copy was made of each suite */
+ kunit_free_suite_set(suite_set);
+ }
+
+out:
+ kunit_handle_shutdown();
+ return err;
+}
+
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+#include "executor_test.c"
+#endif
+
+#endif /* IS_BUILTIN(CONFIG_KUNIT) */
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
new file mode 100644
index 000000000..22d4ee86d
--- /dev/null
+++ b/lib/kunit/executor_test.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the KUnit executor.
+ *
+ * Copyright (C) 2021, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+
+static void free_suite_set_at_end(struct kunit *test, const void *to_free);
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name,
+ struct kunit_case *test_cases);
+
+static void dummy_test(struct kunit *test) {}
+
+static struct kunit_case dummy_test_cases[] = {
+ /* .run_case is not important, just needs to be non-NULL */
+ { .name = "test1", .run_case = dummy_test },
+ { .name = "test2", .run_case = dummy_test },
+ {},
+};
+
+static void parse_filter_test(struct kunit *test)
+{
+ struct kunit_glob_filter filter = {NULL, NULL};
+
+ kunit_parse_glob_filter(&filter, "suite");
+ KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
+ KUNIT_EXPECT_FALSE(test, filter.test_glob);
+ kfree(filter.suite_glob);
+ kfree(filter.test_glob);
+
+ kunit_parse_glob_filter(&filter, "suite.test");
+ KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
+ KUNIT_EXPECT_STREQ(test, filter.test_glob, "test");
+ kfree(filter.suite_glob);
+ kfree(filter.test_glob);
+}
+
+static void filter_suites_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
+
+ /* Want: suite1, suite2, NULL -> suite2, NULL */
+ got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
+
+ /* Contains one element (end is 1 past end) */
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+}
+
+static void filter_suites_test_glob_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
+
+ /* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
+ got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have suite2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->name, "suite2");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+
+ /* Now validate we just have test2 */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, (const char *)got.start[0]->test_cases[0].name, "test2");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
+}
+
+static void filter_suites_to_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
+
+ got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+}
+
+static void parse_filter_attr_test(struct kunit *test)
+{
+ int j, filter_count;
+ struct kunit_attr_filter *parsed_filters;
+ char filters[] = "speed>slow, module!=example", *filter = filters;
+ int err = 0;
+
+ filter_count = kunit_get_filter_count(filters);
+ KUNIT_EXPECT_EQ(test, filter_count, 2);
+
+ parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
+ GFP_KERNEL);
+ for (j = 0; j < filter_count; j++) {
+ parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
+ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
+ }
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[0].input, ">slow");
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[1]), "module");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[1].input, "!=example");
+}
+
+static struct kunit_case dummy_attr_test_cases[] = {
+ /* .run_case is not important, just needs to be non-NULL */
+ { .name = "slow", .run_case = dummy_test, .module_name = "dummy",
+ .attr.speed = KUNIT_SPEED_SLOW },
+ { .name = "normal", .run_case = dummy_test, .module_name = "dummy" },
+ {},
+};
+
+static void filter_attr_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ char filter[] = "speed>slow";
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "slow_suite", dummy_attr_test_cases);
+ subsuite[1]->attr.speed = KUNIT_SPEED_SLOW; // Set suite attribute
+
+ /*
+ * Want: normal_suite(slow, normal), slow_suite(slow, normal),
+ * NULL -> normal_suite(normal), NULL
+ *
+ * The normal test in slow_suite is filtered out because the speed
+ * attribute is unset and thus, the filtering is based on the parent attribute
+ * of slow.
+ */
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we just have normal_suite */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->name, "normal_suite");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+
+ /* Now validate we just have normal test case */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "normal");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
+}
+
+static void filter_attr_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ char filter[] = "module!=dummy";
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
+
+ got = kunit_filter_suites(&suite_set, NULL, filter, NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+}
+
+static void filter_attr_skip_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[2] = {NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[1],
+ };
+ struct kunit_suite_set got;
+ char filter[] = "speed>slow";
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
+
+ /* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
+ got = kunit_filter_suites(&suite_set, NULL, filter, "skip", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ free_suite_set_at_end(test, &got);
+
+ /* Validate we have both the slow and normal test */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_ASSERT_EQ(test, kunit_suite_num_test_cases(got.start[0]), 2);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "slow");
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[1].name, "normal");
+
+ /* Now ensure slow is skipped and normal is not */
+ KUNIT_EXPECT_EQ(test, got.start[0]->test_cases[0].status, KUNIT_SKIPPED);
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].status);
+}
+
+static struct kunit_case executor_test_cases[] = {
+ KUNIT_CASE(parse_filter_test),
+ KUNIT_CASE(filter_suites_test),
+ KUNIT_CASE(filter_suites_test_glob_test),
+ KUNIT_CASE(filter_suites_to_empty_test),
+ KUNIT_CASE(parse_filter_attr_test),
+ KUNIT_CASE(filter_attr_test),
+ KUNIT_CASE(filter_attr_empty_test),
+ KUNIT_CASE(filter_attr_skip_test),
+ {}
+};
+
+static struct kunit_suite executor_test_suite = {
+ .name = "kunit_executor_test",
+ .test_cases = executor_test_cases,
+};
+
+kunit_test_suites(&executor_test_suite);
+
+/* Test helpers */
+
+static void free_suite_set(void *suite_set)
+{
+ kunit_free_suite_set(*(struct kunit_suite_set *)suite_set);
+ kfree(suite_set);
+}
+
+/* Use the resource API to register a call to free_suite_set.
+ * Since we never actually use the resource, it's safe to use on const data.
+ */
+static void free_suite_set_at_end(struct kunit *test, const void *to_free)
+{
+ struct kunit_suite_set *free;
+
+ if (!((struct kunit_suite_set *)to_free)->start)
+ return;
+
+ free = kzalloc(sizeof(struct kunit_suite_set), GFP_KERNEL);
+ *free = *(struct kunit_suite_set *)to_free;
+
+ kunit_add_action(test, free_suite_set, (void *)free);
+}
+
+static struct kunit_suite *alloc_fake_suite(struct kunit *test,
+ const char *suite_name,
+ struct kunit_case *test_cases)
+{
+ struct kunit_suite *suite;
+
+ /* We normally never expect to allocate suites, hence the non-const cast. */
+ suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL);
+ strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1);
+ suite->test_cases = test_cases;
+
+ return suite;
+}
diff --git a/lib/kunit/hooks-impl.h b/lib/kunit/hooks-impl.h
new file mode 100644
index 000000000..4e71b2d01
--- /dev/null
+++ b/lib/kunit/hooks-impl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Declarations for hook implementations.
+ *
+ * These will be set as the function pointers in struct kunit_hook_table,
+ * found in include/kunit/test-bug.h.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_HOOKS_IMPL_H
+#define _KUNIT_HOOKS_IMPL_H
+
+#include <kunit/test-bug.h>
+
+/* List of declarations. */
+void __printf(3, 4) __kunit_fail_current_test_impl(const char *file,
+ int line,
+ const char *fmt, ...);
+void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr);
+
+/* Code to set all of the function pointers. */
+static inline void kunit_install_hooks(void)
+{
+ /* Install the KUnit hook functions. */
+ kunit_hooks.fail_current_test = __kunit_fail_current_test_impl;
+ kunit_hooks.get_static_stub_address = __kunit_get_static_stub_address_impl;
+}
+
+#endif /* _KUNIT_HOOKS_IMPL_H */
diff --git a/lib/kunit/hooks.c b/lib/kunit/hooks.c
new file mode 100644
index 000000000..365d98d49
--- /dev/null
+++ b/lib/kunit/hooks.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit 'Hooks' implementation.
+ *
+ * This file contains code / structures which should be built-in even when
+ * KUnit itself is built as a module.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+
+#include <kunit/test-bug.h>
+
+DEFINE_STATIC_KEY_FALSE(kunit_running);
+EXPORT_SYMBOL(kunit_running);
+
+/* Function pointers for hooks. */
+struct kunit_hooks_table kunit_hooks;
+EXPORT_SYMBOL(kunit_hooks);
+
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
new file mode 100644
index 000000000..01a769f35
--- /dev/null
+++ b/lib/kunit/kunit-example-test.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Example KUnit test to show how to use KUnit.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+
+/*
+ * This is the most fundamental element of KUnit, the test case. A test case
+ * makes a set EXPECTATIONs and ASSERTIONs about the behavior of some code; if
+ * any expectations or assertions are not met, the test fails; otherwise, the
+ * test passes.
+ *
+ * In KUnit, a test case is just a function with the signature
+ * `void (*)(struct kunit *)`. `struct kunit` is a context object that stores
+ * information about the current test.
+ */
+static void example_simple_test(struct kunit *test)
+{
+ /*
+ * This is an EXPECTATION; it is how KUnit tests things. When you want
+ * to test a piece of code, you set some expectations about what the
+ * code should do. KUnit then runs the test and verifies that the code's
+ * behavior matched what was expected.
+ */
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
+ * This is run once before each test case, see the comment on
+ * example_test_suite for more information.
+ */
+static int example_test_init(struct kunit *test)
+{
+ kunit_info(test, "initializing\n");
+
+ return 0;
+}
+
+/*
+ * This is run once after each test case, see the comment on
+ * example_test_suite for more information.
+ */
+static void example_test_exit(struct kunit *test)
+{
+ kunit_info(test, "cleaning up\n");
+}
+
+
+/*
+ * This is run once before all test cases in the suite.
+ * See the comment on example_test_suite for more information.
+ */
+static int example_test_init_suite(struct kunit_suite *suite)
+{
+ kunit_info(suite, "initializing suite\n");
+
+ return 0;
+}
+
+/*
+ * This is run once after all test cases in the suite.
+ * See the comment on example_test_suite for more information.
+ */
+static void example_test_exit_suite(struct kunit_suite *suite)
+{
+ kunit_info(suite, "exiting suite\n");
+}
+
+
+/*
+ * This test should always be skipped.
+ */
+static void example_skip_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should not see a line below.");
+
+ /* Skip (and abort) the test */
+ kunit_skip(test, "this test should be skipped");
+
+ /* This line should not execute */
+ KUNIT_FAIL(test, "You should not see this line.");
+}
+
+/*
+ * This test should always be marked skipped.
+ */
+static void example_mark_skipped_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_info(test, "You should see a line below.");
+
+ /* Skip (but do not abort) the test */
+ kunit_mark_skipped(test, "this test should be skipped");
+
+ /* This line should run */
+ kunit_info(test, "You should see this line.");
+}
+
+/*
+ * This test shows off all the types of KUNIT_EXPECT macros.
+ */
+static void example_all_expect_macros_test(struct kunit *test)
+{
+ const u32 array1[] = { 0x0F, 0xFF };
+ const u32 array2[] = { 0x1F, 0xFF };
+
+ /* Boolean assertions */
+ KUNIT_EXPECT_TRUE(test, true);
+ KUNIT_EXPECT_FALSE(test, false);
+
+ /* Integer assertions */
+ KUNIT_EXPECT_EQ(test, 1, 1); /* check == */
+ KUNIT_EXPECT_GE(test, 1, 1); /* check >= */
+ KUNIT_EXPECT_LE(test, 1, 1); /* check <= */
+ KUNIT_EXPECT_NE(test, 1, 0); /* check != */
+ KUNIT_EXPECT_GT(test, 1, 0); /* check > */
+ KUNIT_EXPECT_LT(test, 0, 1); /* check < */
+
+ /* Pointer assertions */
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test);
+ KUNIT_EXPECT_PTR_EQ(test, NULL, NULL);
+ KUNIT_EXPECT_PTR_NE(test, test, NULL);
+ KUNIT_EXPECT_NULL(test, NULL);
+ KUNIT_EXPECT_NOT_NULL(test, test);
+
+ /* String assertions */
+ KUNIT_EXPECT_STREQ(test, "hi", "hi");
+ KUNIT_EXPECT_STRNEQ(test, "hi", "bye");
+
+ /* Memory block assertions */
+ KUNIT_EXPECT_MEMEQ(test, array1, array1, sizeof(array1));
+ KUNIT_EXPECT_MEMNEQ(test, array1, array2, sizeof(array1));
+
+ /*
+ * There are also ASSERT variants of all of the above that abort test
+ * execution if they fail. Useful for memory allocations, etc.
+ */
+ KUNIT_ASSERT_GT(test, sizeof(char), 0);
+
+ /*
+ * There are also _MSG variants of all of the above that let you include
+ * additional text on failure.
+ */
+ KUNIT_EXPECT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
+ KUNIT_ASSERT_GT_MSG(test, sizeof(int), 0, "Your ints are 0-bit?!");
+}
+
+/* This is a function we'll replace with static stubs. */
+static int add_one(int i)
+{
+ /* This will trigger the stub if active. */
+ KUNIT_STATIC_STUB_REDIRECT(add_one, i);
+
+ return i + 1;
+}
+
+/* This is used as a replacement for the above function. */
+static int subtract_one(int i)
+{
+ /* We don't need to trigger the stub from the replacement. */
+
+ return i - 1;
+}
+
+/*
+ * This test shows the use of static stubs.
+ */
+static void example_static_stub_test(struct kunit *test)
+{
+ /* By default, function is not stubbed. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+
+ /* Replace add_one() with subtract_one(). */
+ kunit_activate_static_stub(test, add_one, subtract_one);
+
+ /* add_one() is now replaced. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 0);
+
+ /* Return add_one() to normal. */
+ kunit_deactivate_static_stub(test, add_one);
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+}
+
+static const struct example_param {
+ int value;
+} example_params_array[] = {
+ { .value = 2, },
+ { .value = 1, },
+ { .value = 0, },
+};
+
+static void example_param_get_desc(const struct example_param *p, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "example value %d", p->value);
+}
+
+KUNIT_ARRAY_PARAM(example, example_params_array, example_param_get_desc);
+
+/*
+ * This test shows the use of params.
+ */
+static void example_params_test(struct kunit *test)
+{
+ const struct example_param *param = test->param_value;
+
+ /* By design, param pointer will not be NULL */
+ KUNIT_ASSERT_NOT_NULL(test, param);
+
+ /* Test can be skipped on unsupported param values */
+ if (!param->value)
+ kunit_skip(test, "unsupported param value");
+
+ /* You can use param values for parameterized testing */
+ KUNIT_EXPECT_EQ(test, param->value % param->value, 0);
+}
+
+/*
+ * This test should always pass. Can be used to practice filtering attributes.
+ */
+static void example_slow_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
+ * Here we make a list of all the test cases we want to add to the test suite
+ * below.
+ */
+static struct kunit_case example_test_cases[] = {
+ /*
+ * This is a helper to create a test case object from a test case
+ * function; its exact function is not important to understand how to
+ * use KUnit, just know that this is how you associate test cases with a
+ * test suite.
+ */
+ KUNIT_CASE(example_simple_test),
+ KUNIT_CASE(example_skip_test),
+ KUNIT_CASE(example_mark_skipped_test),
+ KUNIT_CASE(example_all_expect_macros_test),
+ KUNIT_CASE(example_static_stub_test),
+ KUNIT_CASE_PARAM(example_params_test, example_gen_params),
+ KUNIT_CASE_SLOW(example_slow_test),
+ {}
+};
+
+/*
+ * This defines a suite or grouping of tests.
+ *
+ * Test cases are defined as belonging to the suite by adding them to
+ * `kunit_cases`.
+ *
+ * Often it is desirable to run some function which will set up things which
+ * will be used by every test; this is accomplished with an `init` function
+ * which runs before each test case is invoked. Similarly, an `exit` function
+ * may be specified which runs after every test case and can be used to for
+ * cleanup. For clarity, running tests in a test suite would behave as follows:
+ *
+ * suite.suite_init(suite);
+ * suite.init(test);
+ * suite.test_case[0](test);
+ * suite.exit(test);
+ * suite.init(test);
+ * suite.test_case[1](test);
+ * suite.exit(test);
+ * suite.suite_exit(suite);
+ * ...;
+ */
+static struct kunit_suite example_test_suite = {
+ .name = "example",
+ .init = example_test_init,
+ .exit = example_test_exit,
+ .suite_init = example_test_init_suite,
+ .suite_exit = example_test_exit_suite,
+ .test_cases = example_test_cases,
+};
+
+/*
+ * This registers the above test suite telling KUnit that this is a suite of
+ * tests that need to be run.
+ */
+kunit_test_suites(&example_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
new file mode 100644
index 000000000..83d8e90ca
--- /dev/null
+++ b/lib/kunit/kunit-test.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for core test infrastructure.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "try-catch-impl.h"
+
+struct kunit_try_catch_test_context {
+ struct kunit_try_catch *try_catch;
+ bool function_called;
+};
+
+static void kunit_test_successful_try(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+
+ ctx->function_called = true;
+}
+
+static void kunit_test_no_catch(void *data)
+{
+ struct kunit *test = data;
+
+ KUNIT_FAIL(test, "Catch should not be called\n");
+}
+
+static void kunit_test_try_catch_successful_try_no_catch(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_successful_try,
+ kunit_test_no_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static void kunit_test_unsuccessful_try(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_throw(try_catch);
+ KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_catch(void *data)
+{
+ struct kunit *test = data;
+ struct kunit_try_catch_test_context *ctx = test->priv;
+
+ ctx->function_called = true;
+}
+
+static void kunit_test_try_catch_unsuccessful_try_does_catch(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_unsuccessful_try,
+ kunit_test_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+static int kunit_try_catch_test_init(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ test->priv = ctx;
+
+ ctx->try_catch = kunit_kmalloc(test,
+ sizeof(*ctx->try_catch),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->try_catch);
+
+ return 0;
+}
+
+static struct kunit_case kunit_try_catch_test_cases[] = {
+ KUNIT_CASE(kunit_test_try_catch_successful_try_no_catch),
+ KUNIT_CASE(kunit_test_try_catch_unsuccessful_try_does_catch),
+ {}
+};
+
+static struct kunit_suite kunit_try_catch_test_suite = {
+ .name = "kunit-try-catch-test",
+ .init = kunit_try_catch_test_init,
+ .test_cases = kunit_try_catch_test_cases,
+};
+
+/*
+ * Context for testing test managed resources
+ * is_resource_initialized is used to test arbitrary resources
+ */
+struct kunit_test_resource_context {
+ struct kunit test;
+ bool is_resource_initialized;
+ int allocate_order[2];
+ int free_order[4];
+};
+
+static int fake_resource_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ res->data = &ctx->is_resource_initialized;
+ ctx->is_resource_initialized = true;
+ return 0;
+}
+
+static void fake_resource_free(struct kunit_resource *res)
+{
+ bool *is_resource_initialized = res->data;
+
+ *is_resource_initialized = false;
+}
+
+static void kunit_resource_test_init_resources(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_init_test(&ctx->test, "testing_test_init_test", NULL);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_alloc_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
+ kunit_resource_free_t free = fake_resource_free;
+
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
+ KUNIT_EXPECT_PTR_EQ(test,
+ &ctx->is_resource_initialized,
+ (bool *)res->data);
+ KUNIT_EXPECT_TRUE(test, list_is_last(&res->node, &ctx->test.resources));
+ KUNIT_EXPECT_PTR_EQ(test, free, res->free);
+
+ kunit_put_resource(res);
+}
+
+static inline bool kunit_resource_instance_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_data)
+{
+ return res->data == match_data;
+}
+
+/*
+ * Note: tests below use kunit_alloc_and_get_resource(), so as a consequence
+ * they have a reference to the associated resource that they must release
+ * via kunit_put_resource(). In normal operation, users will only
+ * have to do this for cases where they use kunit_find_resource(), and the
+ * kunit_alloc_resource() function will be used (which does not take a
+ * resource reference).
+ */
+static void kunit_resource_test_destroy_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ kunit_put_resource(res);
+
+ KUNIT_ASSERT_FALSE(test,
+ kunit_destroy_resource(&ctx->test,
+ kunit_resource_instance_match,
+ res->data));
+
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_remove_resource(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res = kunit_alloc_and_get_resource(
+ &ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+
+ /* The resource is in the list */
+ KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources));
+
+ /* Remove the resource. The pointer is still valid, but it can't be
+ * found.
+ */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* We haven't been freed yet. */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Removing the resource multiple times is valid. */
+ kunit_remove_resource(test, res);
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+ /* Despite having been removed twice (from only one reference), the
+ * resource still has not been freed.
+ */
+ KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized);
+
+ /* Free the resource. */
+ kunit_put_resource(res);
+ KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized);
+}
+
+static void kunit_resource_test_cleanup_resources(struct kunit *test)
+{
+ int i;
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *resources[5];
+
+ for (i = 0; i < ARRAY_SIZE(resources); i++) {
+ resources[i] = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_init,
+ fake_resource_free,
+ GFP_KERNEL,
+ ctx);
+ kunit_put_resource(resources[i]);
+ }
+
+ kunit_cleanup(&ctx->test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources));
+}
+
+static void kunit_resource_test_mark_order(int order_array[],
+ size_t order_size,
+ int key)
+{
+ int i;
+
+ for (i = 0; i < order_size && order_array[i]; i++)
+ ;
+
+ order_array[i] = key;
+}
+
+#define KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, order_field, key) \
+ kunit_resource_test_mark_order(ctx->order_field, \
+ ARRAY_SIZE(ctx->order_field), \
+ key)
+
+static int fake_resource_2_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 2);
+
+ res->data = ctx;
+
+ return 0;
+}
+
+static void fake_resource_2_free(struct kunit_resource *res)
+{
+ struct kunit_test_resource_context *ctx = res->data;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 2);
+}
+
+static int fake_resource_1_init(struct kunit_resource *res, void *context)
+{
+ struct kunit_test_resource_context *ctx = context;
+ struct kunit_resource *res2;
+
+ res2 = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_2_init,
+ fake_resource_2_free,
+ GFP_KERNEL,
+ ctx);
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, allocate_order, 1);
+
+ res->data = ctx;
+
+ kunit_put_resource(res2);
+
+ return 0;
+}
+
+static void fake_resource_1_free(struct kunit_resource *res)
+{
+ struct kunit_test_resource_context *ctx = res->data;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(ctx, free_order, 1);
+}
+
+/*
+ * TODO(brendanhiggins@google.com): replace the arrays that keep track of the
+ * order of allocation and freeing with strict mocks using the IN_SEQUENCE macro
+ * to assert allocation and freeing order when the feature becomes available.
+ */
+static void kunit_resource_test_proper_free_ordering(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+ struct kunit_resource *res;
+
+ /* fake_resource_1 allocates a fake_resource_2 in its init. */
+ res = kunit_alloc_and_get_resource(&ctx->test,
+ fake_resource_1_init,
+ fake_resource_1_free,
+ GFP_KERNEL,
+ ctx);
+
+ /*
+ * Since fake_resource_2_init calls KUNIT_RESOURCE_TEST_MARK_ORDER
+ * before returning to fake_resource_1_init, it should be the first to
+ * put its key in the allocate_order array.
+ */
+ KUNIT_EXPECT_EQ(test, ctx->allocate_order[0], 2);
+ KUNIT_EXPECT_EQ(test, ctx->allocate_order[1], 1);
+
+ kunit_put_resource(res);
+
+ kunit_cleanup(&ctx->test);
+
+ /*
+ * Because fake_resource_2 finishes allocation before fake_resource_1,
+ * fake_resource_1 should be freed first since it could depend on
+ * fake_resource_2.
+ */
+ KUNIT_EXPECT_EQ(test, ctx->free_order[0], 1);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
+}
+
+static void kunit_resource_test_static(struct kunit *test)
+{
+ struct kunit_test_resource_context ctx;
+ struct kunit_resource res;
+
+ KUNIT_EXPECT_EQ(test, kunit_add_resource(test, NULL, NULL, &res, &ctx),
+ 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, res.data, (void *)&ctx);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
+static void kunit_resource_test_named(struct kunit *test)
+{
+ struct kunit_resource res1, res2, *found = NULL;
+ struct kunit_test_resource_context ctx;
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ 0);
+ KUNIT_EXPECT_PTR_EQ(test, res1.data, (void *)&ctx);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res1,
+ "resource_1", &ctx),
+ -EEXIST);
+
+ KUNIT_EXPECT_EQ(test,
+ kunit_add_named_resource(test, NULL, NULL, &res2,
+ "resource_2", &ctx),
+ 0);
+
+ found = kunit_find_named_resource(test, "resource_1");
+
+ KUNIT_EXPECT_PTR_EQ(test, found, &res1);
+
+ if (found)
+ kunit_put_resource(&res1);
+
+ KUNIT_EXPECT_EQ(test, kunit_destroy_named_resource(test, "resource_2"),
+ 0);
+
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&test->resources));
+}
+
+static void increment_int(void *ctx)
+{
+ int *i = (int *)ctx;
+ (*i)++;
+}
+
+static void kunit_resource_test_action(struct kunit *test)
+{
+ int num_actions = 0;
+
+ kunit_add_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+
+ /* Once we've cleaned up, the action queue is empty. */
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+
+ /* Check the same function can be deferred multiple times. */
+ kunit_add_action(test, increment_int, &num_actions);
+ kunit_add_action(test, increment_int, &num_actions);
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 3);
+}
+static void kunit_resource_test_remove_action(struct kunit *test)
+{
+ int num_actions = 0;
+
+ kunit_add_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+
+ kunit_remove_action(test, increment_int, &num_actions);
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+}
+static void kunit_resource_test_release_action(struct kunit *test)
+{
+ int num_actions = 0;
+
+ kunit_add_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 0);
+ /* Runs immediately on trigger. */
+ kunit_release_action(test, increment_int, &num_actions);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+
+ /* Doesn't run again on test exit. */
+ kunit_cleanup(test);
+ KUNIT_EXPECT_EQ(test, num_actions, 1);
+}
+static void action_order_1(void *ctx)
+{
+ struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 1);
+ kunit_log(KERN_INFO, current->kunit_test, "action_order_1");
+}
+static void action_order_2(void *ctx)
+{
+ struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx;
+
+ KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 2);
+ kunit_log(KERN_INFO, current->kunit_test, "action_order_2");
+}
+static void kunit_resource_test_action_ordering(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_add_action(test, action_order_1, ctx);
+ kunit_add_action(test, action_order_2, ctx);
+ kunit_add_action(test, action_order_1, ctx);
+ kunit_add_action(test, action_order_2, ctx);
+ kunit_remove_action(test, action_order_1, ctx);
+ kunit_release_action(test, action_order_2, ctx);
+ kunit_cleanup(test);
+
+ /* [2 is triggered] [2], [(1 is cancelled)] [1] */
+ KUNIT_EXPECT_EQ(test, ctx->free_order[0], 2);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2);
+ KUNIT_EXPECT_EQ(test, ctx->free_order[2], 1);
+}
+
+static int kunit_resource_test_init(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx =
+ kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ test->priv = ctx;
+
+ kunit_init_test(&ctx->test, "test_test_context", NULL);
+
+ return 0;
+}
+
+static void kunit_resource_test_exit(struct kunit *test)
+{
+ struct kunit_test_resource_context *ctx = test->priv;
+
+ kunit_cleanup(&ctx->test);
+ kfree(ctx);
+}
+
+static struct kunit_case kunit_resource_test_cases[] = {
+ KUNIT_CASE(kunit_resource_test_init_resources),
+ KUNIT_CASE(kunit_resource_test_alloc_resource),
+ KUNIT_CASE(kunit_resource_test_destroy_resource),
+ KUNIT_CASE(kunit_resource_test_remove_resource),
+ KUNIT_CASE(kunit_resource_test_cleanup_resources),
+ KUNIT_CASE(kunit_resource_test_proper_free_ordering),
+ KUNIT_CASE(kunit_resource_test_static),
+ KUNIT_CASE(kunit_resource_test_named),
+ KUNIT_CASE(kunit_resource_test_action),
+ KUNIT_CASE(kunit_resource_test_remove_action),
+ KUNIT_CASE(kunit_resource_test_release_action),
+ KUNIT_CASE(kunit_resource_test_action_ordering),
+ {}
+};
+
+static struct kunit_suite kunit_resource_test_suite = {
+ .name = "kunit-resource-test",
+ .init = kunit_resource_test_init,
+ .exit = kunit_resource_test_exit,
+ .test_cases = kunit_resource_test_cases,
+};
+
+static void kunit_log_test(struct kunit *test)
+{
+ struct kunit_suite suite;
+
+ suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
+
+ kunit_log(KERN_INFO, test, "put this in log.");
+ kunit_log(KERN_INFO, test, "this too.");
+ kunit_log(KERN_INFO, &suite, "add to suite log.");
+ kunit_log(KERN_INFO, &suite, "along with this.");
+
+#ifdef CONFIG_KUNIT_DEBUGFS
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "put this in log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(test->log, "this too."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite.log, "add to suite log."));
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
+ strstr(suite.log, "along with this."));
+#else
+ KUNIT_EXPECT_NULL(test, test->log);
+#endif
+}
+
+static void kunit_log_newline_test(struct kunit *test)
+{
+ kunit_info(test, "Add newline\n");
+ if (test->log) {
+ KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(test->log, "Add newline\n"),
+ "Missing log line, full log:\n%s", test->log);
+ KUNIT_EXPECT_NULL(test, strstr(test->log, "Add newline\n\n"));
+ } else {
+ kunit_skip(test, "only useful when debugfs is enabled");
+ }
+}
+
+static struct kunit_case kunit_log_test_cases[] = {
+ KUNIT_CASE(kunit_log_test),
+ KUNIT_CASE(kunit_log_newline_test),
+ {}
+};
+
+static struct kunit_suite kunit_log_test_suite = {
+ .name = "kunit-log-test",
+ .test_cases = kunit_log_test_cases,
+};
+
+static void kunit_status_set_failure_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
+ kunit_set_failure(&fake);
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+
+static void kunit_status_mark_skipped_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ /* Before: Should be SUCCESS with no comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
+
+ /* Mark the test as skipped. */
+ kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
+
+ /* After: Should be SKIPPED with our comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+
+static struct kunit_case kunit_status_test_cases[] = {
+ KUNIT_CASE(kunit_status_set_failure_test),
+ KUNIT_CASE(kunit_status_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite kunit_status_test_suite = {
+ .name = "kunit_status",
+ .test_cases = kunit_status_test_cases,
+};
+
+static void kunit_current_test(struct kunit *test)
+{
+ /* Check results of both current->kunit_test and
+ * kunit_get_current_test() are equivalent to current test.
+ */
+ KUNIT_EXPECT_PTR_EQ(test, test, current->kunit_test);
+ KUNIT_EXPECT_PTR_EQ(test, test, kunit_get_current_test());
+}
+
+static void kunit_current_fail_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+
+ /* Set current->kunit_test to fake test. */
+ current->kunit_test = &fake;
+
+ kunit_fail_current_test("This should make `fake` test fail.");
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+ kunit_cleanup(&fake);
+
+ /* Reset current->kunit_test to current test. */
+ current->kunit_test = test;
+}
+
+static struct kunit_case kunit_current_test_cases[] = {
+ KUNIT_CASE(kunit_current_test),
+ KUNIT_CASE(kunit_current_fail_test),
+ {}
+};
+
+static struct kunit_suite kunit_current_test_suite = {
+ .name = "kunit_current",
+ .test_cases = kunit_current_test_cases,
+};
+
+kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
+ &kunit_log_test_suite, &kunit_status_test_suite,
+ &kunit_current_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c
new file mode 100644
index 000000000..f0209252b
--- /dev/null
+++ b/lib/kunit/resource.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/kref.h>
+
+/*
+ * Used for static resources and when a kunit_resource * has been created by
+ * kunit_alloc_resource(). When an init function is supplied, @data is passed
+ * into the init function; otherwise, we simply set the resource data field to
+ * the data value passed in. Doesn't initialize res->should_kfree.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ res->free = free;
+ kref_init(&res->refcount);
+
+ if (init) {
+ ret = init(res, data);
+ if (ret)
+ return ret;
+ } else {
+ res->data = data;
+ }
+
+ spin_lock_irqsave(&test->lock, flags);
+ list_add_tail(&res->node, &test->resources);
+ /* refcount for list is established by kref_init() */
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kunit_add_resource);
+
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res)
+{
+ unsigned long flags;
+ bool was_linked;
+
+ spin_lock_irqsave(&test->lock, flags);
+ was_linked = !list_empty(&res->node);
+ list_del_init(&res->node);
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ if (was_linked)
+ kunit_put_resource(res);
+}
+EXPORT_SYMBOL_GPL(kunit_remove_resource);
+
+int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res = kunit_find_resource(test, match,
+ match_data);
+
+ if (!res)
+ return -ENOENT;
+
+ kunit_remove_resource(test, res);
+
+ /* We have a reference also via _find(); drop it. */
+ kunit_put_resource(res);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_destroy_resource);
+
+struct kunit_action_ctx {
+ struct kunit_resource res;
+ kunit_action_t *func;
+ void *ctx;
+};
+
+static void __kunit_action_free(struct kunit_resource *res)
+{
+ struct kunit_action_ctx *action_ctx = container_of(res, struct kunit_action_ctx, res);
+
+ action_ctx->func(action_ctx->ctx);
+}
+
+
+int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx)
+{
+ struct kunit_action_ctx *action_ctx;
+
+ KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!");
+
+ action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL);
+ if (!action_ctx)
+ return -ENOMEM;
+
+ action_ctx->func = action;
+ action_ctx->ctx = ctx;
+
+ action_ctx->res.should_kfree = true;
+ /* As init is NULL, this cannot fail. */
+ __kunit_add_resource(test, NULL, __kunit_action_free, &action_ctx->res, action_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_add_action);
+
+int kunit_add_action_or_reset(struct kunit *test, void (*action)(void *),
+ void *ctx)
+{
+ int res = kunit_add_action(test, action, ctx);
+
+ if (res)
+ action(ctx);
+ return res;
+}
+EXPORT_SYMBOL_GPL(kunit_add_action_or_reset);
+
+static bool __kunit_action_match(struct kunit *test,
+ struct kunit_resource *res, void *match_data)
+{
+ struct kunit_action_ctx *match_ctx = (struct kunit_action_ctx *)match_data;
+ struct kunit_action_ctx *res_ctx = container_of(res, struct kunit_action_ctx, res);
+
+ /* Make sure this is a free function. */
+ if (res->free != __kunit_action_free)
+ return false;
+
+ /* Both the function and context data should match. */
+ return (match_ctx->func == res_ctx->func) && (match_ctx->ctx == res_ctx->ctx);
+}
+
+void kunit_remove_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx)
+{
+ struct kunit_action_ctx match_ctx;
+ struct kunit_resource *res;
+
+ match_ctx.func = action;
+ match_ctx.ctx = ctx;
+
+ res = kunit_find_resource(test, __kunit_action_match, &match_ctx);
+ if (res) {
+ /* Remove the free function so we don't run the action. */
+ res->free = NULL;
+ kunit_remove_resource(test, res);
+ kunit_put_resource(res);
+ }
+}
+EXPORT_SYMBOL_GPL(kunit_remove_action);
+
+void kunit_release_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx)
+{
+ struct kunit_action_ctx match_ctx;
+ struct kunit_resource *res;
+
+ match_ctx.func = action;
+ match_ctx.ctx = ctx;
+
+ res = kunit_find_resource(test, __kunit_action_match, &match_ctx);
+ if (res) {
+ kunit_remove_resource(test, res);
+ /* We have to put() this here, else free won't be called. */
+ kunit_put_resource(res);
+ }
+}
+EXPORT_SYMBOL_GPL(kunit_release_action);
diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c
new file mode 100644
index 000000000..92b2cccd5
--- /dev/null
+++ b/lib/kunit/static_stub.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit function redirection (static stubbing) API.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include "hooks-impl.h"
+
+
+/* Context for a static stub. This is stored in the resource data. */
+struct kunit_static_stub_ctx {
+ void *real_fn_addr;
+ void *replacement_addr;
+};
+
+static void __kunit_static_stub_resource_free(struct kunit_resource *res)
+{
+ kfree(res->data);
+}
+
+/* Matching function for kunit_find_resource(). match_data is real_fn_addr. */
+static bool __kunit_static_stub_resource_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_real_fn_addr)
+{
+ /* This pointer is only valid if res is a static stub resource. */
+ struct kunit_static_stub_ctx *ctx = res->data;
+
+ /* Make sure the resource is a static stub resource. */
+ if (res->free != &__kunit_static_stub_resource_free)
+ return false;
+
+ return ctx->real_fn_addr == match_real_fn_addr;
+}
+
+/* Hook to return the address of the replacement function. */
+void *__kunit_get_static_stub_address_impl(struct kunit *test, void *real_fn_addr)
+{
+ struct kunit_resource *res;
+ struct kunit_static_stub_ctx *ctx;
+ void *replacement_addr;
+
+ res = kunit_find_resource(test,
+ __kunit_static_stub_resource_match,
+ real_fn_addr);
+
+ if (!res)
+ return NULL;
+
+ ctx = res->data;
+ replacement_addr = ctx->replacement_addr;
+ kunit_put_resource(res);
+ return replacement_addr;
+}
+
+void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr)
+{
+ struct kunit_resource *res;
+
+ KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL,
+ "Tried to deactivate a NULL stub.");
+
+ /* Look up the existing stub for this function. */
+ res = kunit_find_resource(test,
+ __kunit_static_stub_resource_match,
+ real_fn_addr);
+
+ /* Error out if the stub doesn't exist. */
+ KUNIT_ASSERT_PTR_NE_MSG(test, res, NULL,
+ "Tried to deactivate a nonexistent stub.");
+
+ /* Free the stub. We 'put' twice, as we got a reference
+ * from kunit_find_resource()
+ */
+ kunit_remove_resource(test, res);
+ kunit_put_resource(res);
+}
+EXPORT_SYMBOL_GPL(kunit_deactivate_static_stub);
+
+/* Helper function for kunit_activate_static_stub(). The macro does
+ * typechecking, so use it instead.
+ */
+void __kunit_activate_static_stub(struct kunit *test,
+ void *real_fn_addr,
+ void *replacement_addr)
+{
+ struct kunit_static_stub_ctx *ctx;
+ struct kunit_resource *res;
+
+ KUNIT_ASSERT_PTR_NE_MSG(test, real_fn_addr, NULL,
+ "Tried to activate a stub for function NULL");
+
+ /* If the replacement address is NULL, deactivate the stub. */
+ if (!replacement_addr) {
+ kunit_deactivate_static_stub(test, replacement_addr);
+ return;
+ }
+
+ /* Look up any existing stubs for this function, and replace them. */
+ res = kunit_find_resource(test,
+ __kunit_static_stub_resource_match,
+ real_fn_addr);
+ if (res) {
+ ctx = res->data;
+ ctx->replacement_addr = replacement_addr;
+
+ /* We got an extra reference from find_resource(), so put it. */
+ kunit_put_resource(res);
+ } else {
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+ ctx->real_fn_addr = real_fn_addr;
+ ctx->replacement_addr = replacement_addr;
+ res = kunit_alloc_resource(test, NULL,
+ &__kunit_static_stub_resource_free,
+ GFP_KERNEL, ctx);
+ }
+}
+EXPORT_SYMBOL_GPL(__kunit_activate_static_stub);
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
new file mode 100644
index 000000000..110f3a993
--- /dev/null
+++ b/lib/kunit/string-stream-test.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for struct string_stream.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/slab.h>
+
+#include "string-stream.h"
+
+static void string_stream_test_empty_on_creation(struct kunit *test)
+{
+ struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+}
+
+static void string_stream_test_not_empty_after_add(struct kunit *test)
+{
+ struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+
+ string_stream_add(stream, "Foo");
+
+ KUNIT_EXPECT_FALSE(test, string_stream_is_empty(stream));
+}
+
+static void string_stream_test_get_string(struct kunit *test)
+{
+ struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+ char *output;
+
+ string_stream_add(stream, "Foo");
+ string_stream_add(stream, " %s", "bar");
+
+ output = string_stream_get_string(stream);
+ KUNIT_ASSERT_STREQ(test, output, "Foo bar");
+}
+
+static struct kunit_case string_stream_test_cases[] = {
+ KUNIT_CASE(string_stream_test_empty_on_creation),
+ KUNIT_CASE(string_stream_test_not_empty_after_add),
+ KUNIT_CASE(string_stream_test_get_string),
+ {}
+};
+
+static struct kunit_suite string_stream_test_suite = {
+ .name = "string-stream-test",
+ .test_cases = string_stream_test_cases
+};
+kunit_test_suites(&string_stream_test_suite);
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
new file mode 100644
index 000000000..cc32743c1
--- /dev/null
+++ b/lib/kunit/string-stream.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * C++ stream style string builder used in KUnit for building messages.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "string-stream.h"
+
+
+static struct string_stream_fragment *alloc_string_stream_fragment(
+ struct kunit *test, int len, gfp_t gfp)
+{
+ struct string_stream_fragment *frag;
+
+ frag = kunit_kzalloc(test, sizeof(*frag), gfp);
+ if (!frag)
+ return ERR_PTR(-ENOMEM);
+
+ frag->fragment = kunit_kmalloc(test, len, gfp);
+ if (!frag->fragment) {
+ kunit_kfree(test, frag);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return frag;
+}
+
+static void string_stream_fragment_destroy(struct kunit *test,
+ struct string_stream_fragment *frag)
+{
+ list_del(&frag->node);
+ kunit_kfree(test, frag->fragment);
+ kunit_kfree(test, frag);
+}
+
+int string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args)
+{
+ struct string_stream_fragment *frag_container;
+ int len;
+ va_list args_for_counting;
+
+ /* Make a copy because `vsnprintf` could change it */
+ va_copy(args_for_counting, args);
+
+ /* Need space for null byte. */
+ len = vsnprintf(NULL, 0, fmt, args_for_counting) + 1;
+
+ va_end(args_for_counting);
+
+ frag_container = alloc_string_stream_fragment(stream->test,
+ len,
+ stream->gfp);
+ if (IS_ERR(frag_container))
+ return PTR_ERR(frag_container);
+
+ len = vsnprintf(frag_container->fragment, len, fmt, args);
+ spin_lock(&stream->lock);
+ stream->length += len;
+ list_add_tail(&frag_container->node, &stream->fragments);
+ spin_unlock(&stream->lock);
+
+ return 0;
+}
+
+int string_stream_add(struct string_stream *stream, const char *fmt, ...)
+{
+ va_list args;
+ int result;
+
+ va_start(args, fmt);
+ result = string_stream_vadd(stream, fmt, args);
+ va_end(args);
+
+ return result;
+}
+
+static void string_stream_clear(struct string_stream *stream)
+{
+ struct string_stream_fragment *frag_container, *frag_container_safe;
+
+ spin_lock(&stream->lock);
+ list_for_each_entry_safe(frag_container,
+ frag_container_safe,
+ &stream->fragments,
+ node) {
+ string_stream_fragment_destroy(stream->test, frag_container);
+ }
+ stream->length = 0;
+ spin_unlock(&stream->lock);
+}
+
+char *string_stream_get_string(struct string_stream *stream)
+{
+ struct string_stream_fragment *frag_container;
+ size_t buf_len = stream->length + 1; /* +1 for null byte. */
+ char *buf;
+
+ buf = kunit_kzalloc(stream->test, buf_len, stream->gfp);
+ if (!buf)
+ return NULL;
+
+ spin_lock(&stream->lock);
+ list_for_each_entry(frag_container, &stream->fragments, node)
+ strlcat(buf, frag_container->fragment, buf_len);
+ spin_unlock(&stream->lock);
+
+ return buf;
+}
+
+int string_stream_append(struct string_stream *stream,
+ struct string_stream *other)
+{
+ const char *other_content;
+
+ other_content = string_stream_get_string(other);
+
+ if (!other_content)
+ return -ENOMEM;
+
+ return string_stream_add(stream, other_content);
+}
+
+bool string_stream_is_empty(struct string_stream *stream)
+{
+ return list_empty(&stream->fragments);
+}
+
+struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
+{
+ struct string_stream *stream;
+
+ stream = kunit_kzalloc(test, sizeof(*stream), gfp);
+ if (!stream)
+ return ERR_PTR(-ENOMEM);
+
+ stream->gfp = gfp;
+ stream->test = test;
+ INIT_LIST_HEAD(&stream->fragments);
+ spin_lock_init(&stream->lock);
+
+ return stream;
+}
+
+void string_stream_destroy(struct string_stream *stream)
+{
+ string_stream_clear(stream);
+}
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
new file mode 100644
index 000000000..b669f9a75
--- /dev/null
+++ b/lib/kunit/string-stream.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * C++ stream style string builder used in KUnit for building messages.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_STRING_STREAM_H
+#define _KUNIT_STRING_STREAM_H
+
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/stdarg.h>
+
+struct string_stream_fragment {
+ struct list_head node;
+ char *fragment;
+};
+
+struct string_stream {
+ size_t length;
+ struct list_head fragments;
+ /* length and fragments are protected by this lock */
+ spinlock_t lock;
+ struct kunit *test;
+ gfp_t gfp;
+};
+
+struct kunit;
+
+struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
+
+int __printf(2, 3) string_stream_add(struct string_stream *stream,
+ const char *fmt, ...);
+
+int __printf(2, 0) string_stream_vadd(struct string_stream *stream,
+ const char *fmt,
+ va_list args);
+
+char *string_stream_get_string(struct string_stream *stream);
+
+int string_stream_append(struct string_stream *stream,
+ struct string_stream *other);
+
+bool string_stream_is_empty(struct string_stream *stream);
+
+void string_stream_destroy(struct string_stream *stream);
+
+#endif /* _KUNIT_STRING_STREAM_H */
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
new file mode 100644
index 000000000..7452d1a2a
--- /dev/null
+++ b/lib/kunit/test.c
@@ -0,0 +1,929 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <kunit/attributes.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/panic.h>
+#include <linux/sched/debug.h>
+#include <linux/sched.h>
+
+#include "debugfs.h"
+#include "hooks-impl.h"
+#include "string-stream.h"
+#include "try-catch-impl.h"
+
+/*
+ * Hook to fail the current test and print an error message to the log.
+ */
+void __printf(3, 4) __kunit_fail_current_test_impl(const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ char *buffer;
+
+ if (!current->kunit_test)
+ return;
+
+ kunit_set_failure(current->kunit_test);
+
+ /* kunit_err() only accepts literals, so evaluate the args first. */
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ buffer = kunit_kmalloc(current->kunit_test, len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(buffer, len, fmt, args);
+ va_end(args);
+
+ kunit_err(current->kunit_test, "%s:%d: %s", file, line, buffer);
+ kunit_kfree(current->kunit_test, buffer);
+}
+
+/*
+ * Enable KUnit tests to run.
+ */
+#ifdef CONFIG_KUNIT_DEFAULT_ENABLED
+static bool enable_param = true;
+#else
+static bool enable_param;
+#endif
+module_param_named(enable, enable_param, bool, 0);
+MODULE_PARM_DESC(enable, "Enable KUnit tests");
+
+/*
+ * KUnit statistic mode:
+ * 0 - disabled
+ * 1 - only when there is more than one subtest
+ * 2 - enabled
+ */
+static int kunit_stats_enabled = 1;
+module_param_named(stats_enabled, kunit_stats_enabled, int, 0644);
+MODULE_PARM_DESC(stats_enabled,
+ "Print test stats: never (0), only for multiple subtests (1), or always (2)");
+
+struct kunit_result_stats {
+ unsigned long passed;
+ unsigned long skipped;
+ unsigned long failed;
+ unsigned long total;
+};
+
+static bool kunit_should_print_stats(struct kunit_result_stats stats)
+{
+ if (kunit_stats_enabled == 0)
+ return false;
+
+ if (kunit_stats_enabled == 2)
+ return true;
+
+ return (stats.total > 1);
+}
+
+static void kunit_print_test_stats(struct kunit *test,
+ struct kunit_result_stats stats)
+{
+ if (!kunit_should_print_stats(stats))
+ return;
+
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ test->name,
+ stats.passed,
+ stats.failed,
+ stats.skipped,
+ stats.total);
+}
+
+/**
+ * kunit_log_newline() - Add newline to the end of log if one is not
+ * already present.
+ * @log: The log to add the newline to.
+ */
+static void kunit_log_newline(char *log)
+{
+ int log_len, len_left;
+
+ log_len = strlen(log);
+ len_left = KUNIT_LOG_SIZE - log_len - 1;
+
+ if (log_len > 0 && log[log_len - 1] != '\n')
+ strncat(log, "\n", len_left);
+}
+
+/*
+ * Append formatted message to log, size of which is limited to
+ * KUNIT_LOG_SIZE bytes (including null terminating byte).
+ */
+void kunit_log_append(char *log, const char *fmt, ...)
+{
+ va_list args;
+ int len, log_len, len_left;
+
+ if (!log)
+ return;
+
+ log_len = strlen(log);
+ len_left = KUNIT_LOG_SIZE - log_len - 1;
+ if (len_left <= 0)
+ return;
+
+ /* Evaluate length of line to add to log */
+ va_start(args, fmt);
+ len = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ /* Print formatted line to the log */
+ va_start(args, fmt);
+ vsnprintf(log + log_len, min(len, len_left), fmt, args);
+ va_end(args);
+
+ /* Add newline to end of log if not already present. */
+ kunit_log_newline(log);
+}
+EXPORT_SYMBOL_GPL(kunit_log_append);
+
+size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
+{
+ struct kunit_case *test_case;
+ size_t len = 0;
+
+ kunit_suite_for_each_test_case(suite, test_case)
+ len++;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
+
+/* Currently supported test levels */
+enum {
+ KUNIT_LEVEL_SUITE = 0,
+ KUNIT_LEVEL_CASE,
+ KUNIT_LEVEL_CASE_PARAM,
+};
+
+static void kunit_print_suite_start(struct kunit_suite *suite)
+{
+ /*
+ * We do not log the test suite header as doing so would
+ * mean debugfs display would consist of the test suite
+ * header prior to individual test results.
+ * Hence directly printk the suite status, and we will
+ * separately seq_printf() the suite header for the debugfs
+ * representation.
+ */
+ pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+ pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
+ suite->name);
+ kunit_print_attr((void *)suite, false, KUNIT_LEVEL_CASE);
+ pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
+ kunit_suite_num_test_cases(suite));
+}
+
+static void kunit_print_ok_not_ok(struct kunit *test,
+ unsigned int test_level,
+ enum kunit_status status,
+ size_t test_number,
+ const char *description,
+ const char *directive)
+{
+ const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
+ const char *directive_body = (status == KUNIT_SKIPPED) ? directive : "";
+
+ /*
+ * When test is NULL assume that results are from the suite
+ * and today suite results are expected at level 0 only.
+ */
+ WARN(!test && test_level, "suite test level can't be %u!\n", test_level);
+
+ /*
+ * We do not log the test suite results as doing so would
+ * mean debugfs display would consist of an incorrect test
+ * number. Hence directly printk the suite result, and we will
+ * separately seq_printf() the suite results for the debugfs
+ * representation.
+ */
+ if (!test)
+ pr_info("%s %zd %s%s%s\n",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ directive_body);
+ else
+ kunit_log(KERN_INFO, test,
+ "%*s%s %zd %s%s%s",
+ KUNIT_INDENT_LEN * test_level, "",
+ kunit_status_to_ok_not_ok(status),
+ test_number, description, directive_header,
+ directive_body);
+}
+
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
+{
+ const struct kunit_case *test_case;
+ enum kunit_status status = KUNIT_SKIPPED;
+
+ if (suite->suite_init_err)
+ return KUNIT_FAILURE;
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ if (test_case->status == KUNIT_FAILURE)
+ return KUNIT_FAILURE;
+ else if (test_case->status == KUNIT_SUCCESS)
+ status = KUNIT_SUCCESS;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
+
+static size_t kunit_suite_counter = 1;
+
+static void kunit_print_suite_end(struct kunit_suite *suite)
+{
+ kunit_print_ok_not_ok(NULL, KUNIT_LEVEL_SUITE,
+ kunit_suite_has_succeeded(suite),
+ kunit_suite_counter++,
+ suite->name,
+ suite->status_comment);
+}
+
+unsigned int kunit_test_case_num(struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ struct kunit_case *tc;
+ unsigned int i = 1;
+
+ kunit_suite_for_each_test_case(suite, tc) {
+ if (tc == test_case)
+ return i;
+ i++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_test_case_num);
+
+static void kunit_print_string_stream(struct kunit *test,
+ struct string_stream *stream)
+{
+ struct string_stream_fragment *fragment;
+ char *buf;
+
+ if (string_stream_is_empty(stream))
+ return;
+
+ buf = string_stream_get_string(stream);
+ if (!buf) {
+ kunit_err(test,
+ "Could not allocate buffer, dumping stream:\n");
+ list_for_each_entry(fragment, &stream->fragments, node) {
+ kunit_err(test, "%s", fragment->fragment);
+ }
+ kunit_err(test, "\n");
+ } else {
+ kunit_err(test, "%s", buf);
+ kunit_kfree(test, buf);
+ }
+}
+
+static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
+ enum kunit_assert_type type, const struct kunit_assert *assert,
+ assert_format_t assert_format, const struct va_format *message)
+{
+ struct string_stream *stream;
+
+ kunit_set_failure(test);
+
+ stream = alloc_string_stream(test, GFP_KERNEL);
+ if (IS_ERR(stream)) {
+ WARN(true,
+ "Could not allocate stream to print failed assertion in %s:%d\n",
+ loc->file,
+ loc->line);
+ return;
+ }
+
+ kunit_assert_prologue(loc, type, stream);
+ assert_format(assert, message, stream);
+
+ kunit_print_string_stream(test, stream);
+
+ string_stream_destroy(stream);
+}
+
+void __noreturn __kunit_abort(struct kunit *test)
+{
+ kunit_try_catch_throw(&test->try_catch); /* Does not return. */
+
+ /*
+ * Throw could not abort from test.
+ *
+ * XXX: we should never reach this line! As kunit_try_catch_throw is
+ * marked __noreturn.
+ */
+ WARN_ONCE(true, "Throw could not abort from test!\n");
+}
+EXPORT_SYMBOL_GPL(__kunit_abort);
+
+void __kunit_do_failed_assertion(struct kunit *test,
+ const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ const struct kunit_assert *assert,
+ assert_format_t assert_format,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct va_format message;
+ va_start(args, fmt);
+
+ message.fmt = fmt;
+ message.va = &args;
+
+ kunit_fail(test, loc, type, assert, assert_format, &message);
+
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(__kunit_do_failed_assertion);
+
+void kunit_init_test(struct kunit *test, const char *name, char *log)
+{
+ spin_lock_init(&test->lock);
+ INIT_LIST_HEAD(&test->resources);
+ test->name = name;
+ test->log = log;
+ if (test->log)
+ test->log[0] = '\0';
+ test->status = KUNIT_SUCCESS;
+ test->status_comment[0] = '\0';
+}
+EXPORT_SYMBOL_GPL(kunit_init_test);
+
+/* Only warn when a test takes more than twice the threshold */
+#define KUNIT_SPEED_WARNING_MULTIPLIER 2
+
+/* Slow tests are defined as taking more than 1s */
+#define KUNIT_SPEED_SLOW_THRESHOLD_S 1
+
+#define KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S \
+ (KUNIT_SPEED_WARNING_MULTIPLIER * KUNIT_SPEED_SLOW_THRESHOLD_S)
+
+#define s_to_timespec64(s) ns_to_timespec64((s) * NSEC_PER_SEC)
+
+static void kunit_run_case_check_speed(struct kunit *test,
+ struct kunit_case *test_case,
+ struct timespec64 duration)
+{
+ struct timespec64 slow_thr =
+ s_to_timespec64(KUNIT_SPEED_SLOW_WARNING_THRESHOLD_S);
+ enum kunit_speed speed = test_case->attr.speed;
+
+ if (timespec64_compare(&duration, &slow_thr) < 0)
+ return;
+
+ if (speed == KUNIT_SPEED_VERY_SLOW || speed == KUNIT_SPEED_SLOW)
+ return;
+
+ kunit_warn(test,
+ "Test should be marked slow (runtime: %lld.%09lds)",
+ duration.tv_sec, duration.tv_nsec);
+}
+
+/*
+ * Initializes and runs test case. Does not clean up or do post validations.
+ */
+static void kunit_run_case_internal(struct kunit *test,
+ struct kunit_suite *suite,
+ struct kunit_case *test_case)
+{
+ struct timespec64 start, end;
+
+ if (suite->init) {
+ int ret;
+
+ ret = suite->init(test);
+ if (ret) {
+ kunit_err(test, "failed to initialize: %d\n", ret);
+ kunit_set_failure(test);
+ return;
+ }
+ }
+
+ ktime_get_ts64(&start);
+
+ test_case->run_case(test);
+
+ ktime_get_ts64(&end);
+
+ kunit_run_case_check_speed(test, test_case, timespec64_sub(end, start));
+}
+
+static void kunit_case_internal_cleanup(struct kunit *test)
+{
+ kunit_cleanup(test);
+}
+
+/*
+ * Performs post validations and cleanup after a test case was run.
+ * XXX: Should ONLY BE CALLED AFTER kunit_run_case_internal!
+ */
+static void kunit_run_case_cleanup(struct kunit *test,
+ struct kunit_suite *suite)
+{
+ if (suite->exit)
+ suite->exit(test);
+
+ kunit_case_internal_cleanup(test);
+}
+
+struct kunit_try_catch_context {
+ struct kunit *test;
+ struct kunit_suite *suite;
+ struct kunit_case *test_case;
+};
+
+static void kunit_try_run_case(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ struct kunit_suite *suite = ctx->suite;
+ struct kunit_case *test_case = ctx->test_case;
+
+ current->kunit_test = test;
+
+ /*
+ * kunit_run_case_internal may encounter a fatal error; if it does,
+ * abort will be called, this thread will exit, and finally the parent
+ * thread will resume control and handle any necessary clean up.
+ */
+ kunit_run_case_internal(test, suite, test_case);
+}
+
+static void kunit_try_run_case_cleanup(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ struct kunit_suite *suite = ctx->suite;
+
+ current->kunit_test = test;
+
+ kunit_run_case_cleanup(test, suite);
+}
+
+static void kunit_catch_run_case_cleanup(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
+
+ /* It is always a failure if cleanup aborts. */
+ kunit_set_failure(test);
+
+ if (try_exit_code) {
+ /*
+ * Test case could not finish, we have no idea what state it is
+ * in, so don't do clean up.
+ */
+ if (try_exit_code == -ETIMEDOUT) {
+ kunit_err(test, "test case cleanup timed out\n");
+ /*
+ * Unknown internal error occurred preventing test case from
+ * running, so there is nothing to clean up.
+ */
+ } else {
+ kunit_err(test, "internal error occurred during test case cleanup: %d\n",
+ try_exit_code);
+ }
+ return;
+ }
+
+ kunit_err(test, "test aborted during cleanup. continuing without cleaning up\n");
+}
+
+
+static void kunit_catch_run_case(void *data)
+{
+ struct kunit_try_catch_context *ctx = data;
+ struct kunit *test = ctx->test;
+ int try_exit_code = kunit_try_catch_get_result(&test->try_catch);
+
+ if (try_exit_code) {
+ kunit_set_failure(test);
+ /*
+ * Test case could not finish, we have no idea what state it is
+ * in, so don't do clean up.
+ */
+ if (try_exit_code == -ETIMEDOUT) {
+ kunit_err(test, "test case timed out\n");
+ /*
+ * Unknown internal error occurred preventing test case from
+ * running, so there is nothing to clean up.
+ */
+ } else {
+ kunit_err(test, "internal error occurred preventing test case from running: %d\n",
+ try_exit_code);
+ }
+ return;
+ }
+}
+
+/*
+ * Performs all logic to run a test case. It also catches most errors that
+ * occur in a test case and reports them as failures.
+ */
+static void kunit_run_case_catch_errors(struct kunit_suite *suite,
+ struct kunit_case *test_case,
+ struct kunit *test)
+{
+ struct kunit_try_catch_context context;
+ struct kunit_try_catch *try_catch;
+
+ try_catch = &test->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_try_run_case,
+ kunit_catch_run_case);
+ context.test = test;
+ context.suite = suite;
+ context.test_case = test_case;
+ kunit_try_catch_run(try_catch, &context);
+
+ /* Now run the cleanup */
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_try_run_case_cleanup,
+ kunit_catch_run_case_cleanup);
+ kunit_try_catch_run(try_catch, &context);
+
+ /* Propagate the parameter result to the test case. */
+ if (test->status == KUNIT_FAILURE)
+ test_case->status = KUNIT_FAILURE;
+ else if (test_case->status != KUNIT_FAILURE && test->status == KUNIT_SUCCESS)
+ test_case->status = KUNIT_SUCCESS;
+}
+
+static void kunit_print_suite_stats(struct kunit_suite *suite,
+ struct kunit_result_stats suite_stats,
+ struct kunit_result_stats param_stats)
+{
+ if (kunit_should_print_stats(suite_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ suite->name,
+ suite_stats.passed,
+ suite_stats.failed,
+ suite_stats.skipped,
+ suite_stats.total);
+ }
+
+ if (kunit_should_print_stats(param_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# Totals: pass:%lu fail:%lu skip:%lu total:%lu",
+ param_stats.passed,
+ param_stats.failed,
+ param_stats.skipped,
+ param_stats.total);
+ }
+}
+
+static void kunit_update_stats(struct kunit_result_stats *stats,
+ enum kunit_status status)
+{
+ switch (status) {
+ case KUNIT_SUCCESS:
+ stats->passed++;
+ break;
+ case KUNIT_SKIPPED:
+ stats->skipped++;
+ break;
+ case KUNIT_FAILURE:
+ stats->failed++;
+ break;
+ }
+
+ stats->total++;
+}
+
+static void kunit_accumulate_stats(struct kunit_result_stats *total,
+ struct kunit_result_stats add)
+{
+ total->passed += add.passed;
+ total->skipped += add.skipped;
+ total->failed += add.failed;
+ total->total += add.total;
+}
+
+int kunit_run_tests(struct kunit_suite *suite)
+{
+ char param_desc[KUNIT_PARAM_DESC_SIZE];
+ struct kunit_case *test_case;
+ struct kunit_result_stats suite_stats = { 0 };
+ struct kunit_result_stats total_stats = { 0 };
+
+ /* Taint the kernel so we know we've run tests. */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ if (suite->suite_init) {
+ suite->suite_init_err = suite->suite_init(suite);
+ if (suite->suite_init_err) {
+ kunit_err(suite, KUNIT_SUBTEST_INDENT
+ "# failed to initialize (%d)", suite->suite_init_err);
+ goto suite_end;
+ }
+ }
+
+ kunit_print_suite_start(suite);
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ struct kunit test = { .param_value = NULL, .param_index = 0 };
+ struct kunit_result_stats param_stats = { 0 };
+
+ kunit_init_test(&test, test_case->name, test_case->log);
+ if (test_case->status == KUNIT_SKIPPED) {
+ /* Test marked as skip */
+ test.status = KUNIT_SKIPPED;
+ kunit_update_stats(&param_stats, test.status);
+ } else if (!test_case->generate_params) {
+ /* Non-parameterised test. */
+ test_case->status = KUNIT_SKIPPED;
+ kunit_run_case_catch_errors(suite, test_case, &test);
+ kunit_update_stats(&param_stats, test.status);
+ } else {
+ /* Get initial param. */
+ param_desc[0] = '\0';
+ test.param_value = test_case->generate_params(NULL, param_desc);
+ test_case->status = KUNIT_SKIPPED;
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "KTAP version 1\n");
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "# Subtest: %s", test_case->name);
+
+ while (test.param_value) {
+ kunit_run_case_catch_errors(suite, test_case, &test);
+
+ if (param_desc[0] == '\0') {
+ snprintf(param_desc, sizeof(param_desc),
+ "param-%d", test.param_index);
+ }
+
+ kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE_PARAM,
+ test.status,
+ test.param_index + 1,
+ param_desc,
+ test.status_comment);
+
+ /* Get next param. */
+ param_desc[0] = '\0';
+ test.param_value = test_case->generate_params(test.param_value, param_desc);
+ test.param_index++;
+
+ kunit_update_stats(&param_stats, test.status);
+ }
+ }
+
+ kunit_print_attr((void *)test_case, true, KUNIT_LEVEL_CASE);
+
+ kunit_print_test_stats(&test, param_stats);
+
+ kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE, test_case->status,
+ kunit_test_case_num(suite, test_case),
+ test_case->name,
+ test.status_comment);
+
+ kunit_update_stats(&suite_stats, test_case->status);
+ kunit_accumulate_stats(&total_stats, param_stats);
+ }
+
+ if (suite->suite_exit)
+ suite->suite_exit(suite);
+
+ kunit_print_suite_stats(suite, suite_stats, total_stats);
+suite_end:
+ kunit_print_suite_end(suite);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kunit_run_tests);
+
+static void kunit_init_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_create_suite(suite);
+ suite->status_comment[0] = '\0';
+ suite->suite_init_err = 0;
+}
+
+bool kunit_enabled(void)
+{
+ return enable_param;
+}
+
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites)
+{
+ unsigned int i;
+
+ if (!kunit_enabled() && num_suites > 0) {
+ pr_info("kunit: disabled\n");
+ return 0;
+ }
+
+ kunit_suite_counter = 1;
+
+ static_branch_inc(&kunit_running);
+
+ for (i = 0; i < num_suites; i++) {
+ kunit_init_suite(suites[i]);
+ kunit_run_tests(suites[i]);
+ }
+
+ static_branch_dec(&kunit_running);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
+
+static void kunit_exit_suite(struct kunit_suite *suite)
+{
+ kunit_debugfs_destroy_suite(suite);
+}
+
+void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites)
+{
+ unsigned int i;
+
+ if (!kunit_enabled())
+ return;
+
+ for (i = 0; i < num_suites; i++)
+ kunit_exit_suite(suites[i]);
+}
+EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
+
+#ifdef CONFIG_MODULES
+static void kunit_module_init(struct module *mod)
+{
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+ int err = 0;
+
+ suite_set = kunit_filter_suites(&suite_set,
+ kunit_filter_glob() ?: "*.*",
+ kunit_filter(), kunit_filter_action(),
+ &err);
+ if (err)
+ pr_err("kunit module: error filtering suites: %d\n", err);
+
+ mod->kunit_suites = (struct kunit_suite **)suite_set.start;
+ mod->num_kunit_suites = suite_set.end - suite_set.start;
+
+ if (!action)
+ kunit_exec_run_tests(&suite_set, false);
+ else if (!strcmp(action, "list"))
+ kunit_exec_list_tests(&suite_set, false);
+ else if (!strcmp(action, "list_attr"))
+ kunit_exec_list_tests(&suite_set, true);
+ else
+ pr_err("kunit: unknown action '%s'\n", action);
+}
+
+static void kunit_module_exit(struct module *mod)
+{
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+
+ if (!action)
+ __kunit_test_suites_exit(mod->kunit_suites,
+ mod->num_kunit_suites);
+
+ if (suite_set.start)
+ kunit_free_suite_set(suite_set);
+}
+
+static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct module *mod = data;
+
+ switch (val) {
+ case MODULE_STATE_LIVE:
+ break;
+ case MODULE_STATE_GOING:
+ kunit_module_exit(mod);
+ break;
+ case MODULE_STATE_COMING:
+ kunit_module_init(mod);
+ break;
+ case MODULE_STATE_UNFORMED:
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block kunit_mod_nb = {
+ .notifier_call = kunit_module_notify,
+ .priority = 0,
+};
+#endif
+
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
+{
+ void *data;
+
+ data = kmalloc_array(n, size, gfp);
+
+ if (!data)
+ return NULL;
+
+ if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0)
+ return NULL;
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(kunit_kmalloc_array);
+
+void kunit_kfree(struct kunit *test, const void *ptr)
+{
+ if (!ptr)
+ return;
+
+ kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr);
+}
+EXPORT_SYMBOL_GPL(kunit_kfree);
+
+void kunit_cleanup(struct kunit *test)
+{
+ struct kunit_resource *res;
+ unsigned long flags;
+
+ /*
+ * test->resources is a stack - each allocation must be freed in the
+ * reverse order from which it was added since one resource may depend
+ * on another for its entire lifetime.
+ * Also, we cannot use the normal list_for_each constructs, even the
+ * safe ones because *arbitrary* nodes may be deleted when
+ * kunit_resource_free is called; the list_for_each_safe variants only
+ * protect against the current node being deleted, not the next.
+ */
+ while (true) {
+ spin_lock_irqsave(&test->lock, flags);
+ if (list_empty(&test->resources)) {
+ spin_unlock_irqrestore(&test->lock, flags);
+ break;
+ }
+ res = list_last_entry(&test->resources,
+ struct kunit_resource,
+ node);
+ /*
+ * Need to unlock here as a resource may remove another
+ * resource, and this can't happen if the test->lock
+ * is held.
+ */
+ spin_unlock_irqrestore(&test->lock, flags);
+ kunit_remove_resource(test, res);
+ }
+ current->kunit_test = NULL;
+}
+EXPORT_SYMBOL_GPL(kunit_cleanup);
+
+static int __init kunit_init(void)
+{
+ /* Install the KUnit hook functions. */
+ kunit_install_hooks();
+
+ kunit_debugfs_init();
+#ifdef CONFIG_MODULES
+ return register_module_notifier(&kunit_mod_nb);
+#else
+ return 0;
+#endif
+}
+late_initcall(kunit_init);
+
+static void __exit kunit_exit(void)
+{
+ memset(&kunit_hooks, 0, sizeof(kunit_hooks));
+#ifdef CONFIG_MODULES
+ unregister_module_notifier(&kunit_mod_nb);
+#endif
+ kunit_debugfs_cleanup();
+}
+module_exit(kunit_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/try-catch-impl.h b/lib/kunit/try-catch-impl.h
new file mode 100644
index 000000000..203ba6a5e
--- /dev/null
+++ b/lib/kunit/try-catch-impl.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal kunit try catch implementation to be shared with tests.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TRY_CATCH_IMPL_H
+#define _KUNIT_TRY_CATCH_IMPL_H
+
+#include <kunit/try-catch.h>
+#include <linux/types.h>
+
+struct kunit;
+
+static inline void kunit_try_catch_init(struct kunit_try_catch *try_catch,
+ struct kunit *test,
+ kunit_try_catch_func_t try,
+ kunit_try_catch_func_t catch)
+{
+ try_catch->test = test;
+ try_catch->try = try;
+ try_catch->catch = catch;
+}
+
+#endif /* _KUNIT_TRY_CATCH_IMPL_H */
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
new file mode 100644
index 000000000..f7825991d
--- /dev/null
+++ b/lib/kunit/try-catch.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An API to allow a function, that may fail, to be executed, and recover in a
+ * controlled manner.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include "try-catch-impl.h"
+
+void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
+{
+ try_catch->try_result = -EFAULT;
+ kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
+}
+EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
+
+static int kunit_generic_run_threadfn_adapter(void *data)
+{
+ struct kunit_try_catch *try_catch = data;
+
+ try_catch->try(try_catch->context);
+
+ kthread_complete_and_exit(try_catch->try_completion, 0);
+}
+
+static unsigned long kunit_test_timeout(void)
+{
+ /*
+ * TODO(brendanhiggins@google.com): We should probably have some type of
+ * variable timeout here. The only question is what that timeout value
+ * should be.
+ *
+ * The intention has always been, at some point, to be able to label
+ * tests with some type of size bucket (unit/small, integration/medium,
+ * large/system/end-to-end, etc), where each size bucket would get a
+ * default timeout value kind of like what Bazel does:
+ * https://docs.bazel.build/versions/master/be/common-definitions.html#test.size
+ * There is still some debate to be had on exactly how we do this. (For
+ * one, we probably want to have some sort of test runner level
+ * timeout.)
+ *
+ * For more background on this topic, see:
+ * https://mike-bland.com/2011/11/01/small-medium-large.html
+ *
+ * If tests timeout due to exceeding sysctl_hung_task_timeout_secs,
+ * the task will be killed and an oops generated.
+ */
+ return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */
+}
+
+void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
+{
+ DECLARE_COMPLETION_ONSTACK(try_completion);
+ struct kunit *test = try_catch->test;
+ struct task_struct *task_struct;
+ int exit_code, time_remaining;
+
+ try_catch->context = context;
+ try_catch->try_completion = &try_completion;
+ try_catch->try_result = 0;
+ task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
+ try_catch,
+ "kunit_try_catch_thread");
+ if (IS_ERR(task_struct)) {
+ try_catch->catch(try_catch->context);
+ return;
+ }
+
+ time_remaining = wait_for_completion_timeout(&try_completion,
+ kunit_test_timeout());
+ if (time_remaining == 0) {
+ kunit_err(test, "try timed out\n");
+ try_catch->try_result = -ETIMEDOUT;
+ kthread_stop(task_struct);
+ }
+
+ exit_code = try_catch->try_result;
+
+ if (!exit_code)
+ return;
+
+ if (exit_code == -EFAULT)
+ try_catch->try_result = 0;
+ else if (exit_code == -EINTR)
+ kunit_err(test, "wake_up_process() was never called\n");
+ else if (exit_code)
+ kunit_err(test, "Unknown error: %d\n", exit_code);
+
+ try_catch->catch(try_catch->context);
+}
+EXPORT_SYMBOL_GPL(kunit_try_catch_run);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
new file mode 100644
index 000000000..859b67c4d
--- /dev/null
+++ b/lib/kunit_iov_iter.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* I/O iterator tests. This can only test kernel-backed iterator types.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <kunit/test.h>
+
+MODULE_DESCRIPTION("iov_iter testing");
+MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_LICENSE("GPL");
+
+struct kvec_test_range {
+ int from, to;
+};
+
+static const struct kvec_test_range kvec_test_ranges[] = {
+ { 0x00002, 0x00002 },
+ { 0x00027, 0x03000 },
+ { 0x05193, 0x18794 },
+ { 0x20000, 0x20000 },
+ { 0x20000, 0x24000 },
+ { 0x24000, 0x27001 },
+ { 0x29000, 0xffffb },
+ { 0xffffd, 0xffffe },
+ { -1 }
+};
+
+static inline u8 pattern(unsigned long x)
+{
+ return x & 0xff;
+}
+
+static void iov_kunit_unmap(void *data)
+{
+ vunmap(data);
+}
+
+static void *__init iov_kunit_create_buffer(struct kunit *test,
+ struct page ***ppages,
+ size_t npages)
+{
+ struct page **pages;
+ unsigned long got;
+ void *buffer;
+
+ pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
+ *ppages = pages;
+
+ got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
+ if (got != npages) {
+ release_pages(pages, got);
+ KUNIT_ASSERT_EQ(test, got, npages);
+ }
+
+ buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
+
+ kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
+ return buffer;
+}
+
+static void __init iov_kunit_load_kvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct kvec *kvec, unsigned int kvmax,
+ void *buffer, size_t bufsize,
+ const struct kvec_test_range *pr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < kvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+ kvec[i].iov_base = buffer + pr->from;
+ kvec[i].iov_len = pr->to - pr->from;
+ size += pr->to - pr->from;
+ }
+ KUNIT_ASSERT_LE(test, size, bufsize);
+
+ iov_iter_kvec(iter, dir, kvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_KVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **spages, **bpages;
+ struct kvec kvec[8];
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = min(iter.count, bufsize);
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+struct bvec_test_range {
+ int page, from, to;
+};
+
+static const struct bvec_test_range bvec_test_ranges[] = {
+ { 0, 0x0002, 0x0002 },
+ { 1, 0x0027, 0x0893 },
+ { 2, 0x0193, 0x0794 },
+ { 3, 0x0000, 0x1000 },
+ { 4, 0x0000, 0x1000 },
+ { 5, 0x0000, 0x1000 },
+ { 6, 0x0000, 0x0ffb },
+ { 6, 0x0ffd, 0x0ffe },
+ { -1, -1, -1 }
+};
+
+static void __init iov_kunit_load_bvec(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct bio_vec *bvec, unsigned int bvmax,
+ struct page **pages, size_t npages,
+ size_t bufsize,
+ const struct bvec_test_range *pr)
+{
+ struct page *can_merge = NULL, *page;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < bvmax; i++, pr++) {
+ if (pr->from < 0)
+ break;
+ KUNIT_ASSERT_LT(test, pr->page, npages);
+ KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
+ KUNIT_ASSERT_GE(test, pr->from, 0);
+ KUNIT_ASSERT_GE(test, pr->to, pr->from);
+ KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
+
+ page = pages[pr->page];
+ if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
+ i--;
+ bvec[i].bv_len += pr->to;
+ } else {
+ bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
+ }
+
+ size += pr->to - pr->from;
+ if ((pr->to & ~PAGE_MASK) == 0)
+ can_merge = page + pr->to / PAGE_SIZE;
+ else
+ can_merge = NULL;
+ }
+
+ iov_iter_bvec(iter, dir, bvec, i, size);
+}
+
+/*
+ * Test copying to a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_to_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, b, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_to_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the scratch buffer. */
+ b = 0;
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
+ u8 *p = scratch + pr->page * PAGE_SIZE;
+
+ for (i = pr->from; i < pr->to; i++)
+ p[i] = pattern(patt++);
+ }
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_BVEC-type iterator.
+ */
+static void __init iov_kunit_copy_from_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct bio_vec bvec[8];
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ copied = copy_from_iter(scratch, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
+ size_t patt = pr->page * PAGE_SIZE;
+
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(patt + j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+static void iov_kunit_destroy_xarray(void *data)
+{
+ struct xarray *xarray = data;
+
+ xa_destroy(xarray);
+ kfree(xarray);
+}
+
+static void __init iov_kunit_load_xarray(struct kunit *test,
+ struct iov_iter *iter, int dir,
+ struct xarray *xarray,
+ struct page **pages, size_t npages)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
+
+ KUNIT_ASSERT_FALSE(test, xa_is_err(x));
+ size += PAGE_SIZE;
+ }
+ iov_iter_xarray(iter, dir, xarray, 0, size);
+}
+
+static struct xarray *iov_kunit_create_xarray(struct kunit *test)
+{
+ struct xarray *xarray;
+
+ xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
+ xa_init(xarray);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
+ kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
+ return xarray;
+}
+
+/*
+ * Test copying to a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_to_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, patt;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ for (i = 0; i < bufsize; i++)
+ scratch[i] = pattern(i);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ memset(buffer, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, READ, xarray, pr->from, size);
+ copied = copy_to_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the scratch buffer. */
+ patt = 0;
+ memset(scratch, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++)
+ for (i = pr->from; i < pr->to; i++)
+ scratch[i] = pattern(patt++);
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
+ if (buffer[i] != scratch[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test copying from a ITER_XARRAY-type iterator.
+ */
+static void __init iov_kunit_copy_from_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **spages, **bpages;
+ u8 *scratch, *buffer;
+ size_t bufsize, npages, size, copied;
+ int i, j;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+ for (i = 0; i < bufsize; i++)
+ buffer[i] = pattern(i);
+
+ scratch = iov_kunit_create_buffer(test, &spages, npages);
+ memset(scratch, 0, bufsize);
+
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ i = 0;
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ size = pr->to - pr->from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
+ copied = copy_from_iter(scratch + i, size, &iter);
+
+ KUNIT_EXPECT_EQ(test, copied, size);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
+ i += size;
+ }
+
+ /* Build the expected image in the main buffer. */
+ i = 0;
+ memset(buffer, 0, bufsize);
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ for (j = pr->from; j < pr->to; j++) {
+ buffer[i++] = pattern(j);
+ if (i >= bufsize)
+ goto stop;
+ }
+ }
+stop:
+
+ /* Compare the images */
+ for (i = 0; i < bufsize; i++) {
+ KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
+ if (scratch[i] != buffer[i])
+ return;
+ }
+
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_KVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct kvec kvec[8];
+ u8 *buffer;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ buffer = iov_kunit_create_buffer(test, &bpages, npages);
+
+ iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
+ buffer, bufsize, kvec_test_ranges);
+ size = iter.count;
+
+ pr = kvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_BVEC-type iterators.
+ */
+static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
+{
+ const struct bvec_test_range *pr;
+ struct iov_iter iter;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ struct bio_vec bvec[8];
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
+ bpages, npages, bufsize, bvec_test_ranges);
+ size = iter.count;
+
+ pr = bvec_test_ranges;
+ from = pr->from;
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ size -= len;
+
+ if (len == 0)
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ while (from == pr->to) {
+ pr++;
+ from = pr->from;
+ if (from < 0)
+ goto stop;
+ }
+ ix = pr->page + from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ break;
+ } while (iov_iter_count(&iter) > 0);
+
+stop:
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_SUCCEED();
+}
+
+/*
+ * Test the extraction of ITER_XARRAY-type iterators.
+ */
+static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
+{
+ const struct kvec_test_range *pr;
+ struct iov_iter iter;
+ struct xarray *xarray;
+ struct page **bpages, *pagelist[8], **pages = pagelist;
+ ssize_t len;
+ size_t bufsize, size = 0, npages;
+ int i, from;
+
+ bufsize = 0x100000;
+ npages = bufsize / PAGE_SIZE;
+
+ xarray = iov_kunit_create_xarray(test);
+
+ iov_kunit_create_buffer(test, &bpages, npages);
+ iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
+
+ for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
+ from = pr->from;
+ size = pr->to - from;
+ KUNIT_ASSERT_LE(test, pr->to, bufsize);
+
+ iov_iter_xarray(&iter, WRITE, xarray, from, size);
+
+ do {
+ size_t offset0 = LONG_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++)
+ pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
+
+ len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
+ ARRAY_SIZE(pagelist), 0, &offset0);
+ KUNIT_EXPECT_GE(test, len, 0);
+ if (len < 0)
+ break;
+ KUNIT_EXPECT_LE(test, len, size);
+ KUNIT_EXPECT_EQ(test, iter.count, size - len);
+ if (len == 0)
+ break;
+ size -= len;
+ KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
+ KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
+ struct page *p;
+ ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
+ int ix;
+
+ KUNIT_ASSERT_GE(test, part, 0);
+ ix = from / PAGE_SIZE;
+ KUNIT_ASSERT_LT(test, ix, npages);
+ p = bpages[ix];
+ KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
+ KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
+ from += part;
+ len -= part;
+ KUNIT_ASSERT_GE(test, len, 0);
+ if (len == 0)
+ break;
+ offset0 = 0;
+ }
+
+ if (test->status == KUNIT_FAILURE)
+ goto stop;
+ } while (iov_iter_count(&iter) > 0);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_EQ(test, iter.count, 0);
+ KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
+ }
+
+stop:
+ KUNIT_SUCCEED();
+}
+
+static struct kunit_case __refdata iov_kunit_cases[] = {
+ KUNIT_CASE(iov_kunit_copy_to_kvec),
+ KUNIT_CASE(iov_kunit_copy_from_kvec),
+ KUNIT_CASE(iov_kunit_copy_to_bvec),
+ KUNIT_CASE(iov_kunit_copy_from_bvec),
+ KUNIT_CASE(iov_kunit_copy_to_xarray),
+ KUNIT_CASE(iov_kunit_copy_from_xarray),
+ KUNIT_CASE(iov_kunit_extract_pages_kvec),
+ KUNIT_CASE(iov_kunit_extract_pages_bvec),
+ KUNIT_CASE(iov_kunit_extract_pages_xarray),
+ {}
+};
+
+static struct kunit_suite iov_kunit_suite = {
+ .name = "iov_iter",
+ .test_cases = iov_kunit_cases,
+};
+
+kunit_test_suites(&iov_kunit_suite);
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
new file mode 100644
index 000000000..649e68741
--- /dev/null
+++ b/lib/libcrc32c.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CRC32C
+ *@Article{castagnoli-crc,
+ * author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
+ * title = {{Optimization of Cyclic Redundancy-Check Codes with 24
+ * and 32 Parity Bits}},
+ * journal = IEEE Transactions on Communication,
+ * year = {1993},
+ * volume = {41},
+ * number = {6},
+ * pages = {},
+ * month = {June},
+ *}
+ * Used by the iSCSI driver, possibly others, and derived from
+ * the iscsi-crc.c module of the linux-iscsi driver at
+ * http://linux-iscsi.sourceforge.net.
+ *
+ * Following the example of lib/crc32, this function is intended to be
+ * flexible and useful for all users. Modules that currently have their
+ * own crc32c, but hopefully may be able to use this one are:
+ * net/sctp (please add all your doco to here if you change to
+ * use this one!)
+ * <endoflist>
+ *
+ * Copyright (c) 2004 Cisco Systems, Inc.
+ */
+
+#include <crypto/hash.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crc32c.h>
+
+static struct crypto_shash *tfm;
+
+u32 crc32c(u32 crc, const void *address, unsigned int length)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+ u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
+ int err;
+
+ shash->tfm = tfm;
+ *ctx = crc;
+
+ err = crypto_shash_update(shash, address, length);
+ BUG_ON(err);
+
+ ret = *ctx;
+ barrier_data(ctx);
+ return ret;
+}
+
+EXPORT_SYMBOL(crc32c);
+
+static int __init libcrc32c_mod_init(void)
+{
+ tfm = crypto_alloc_shash("crc32c", 0, 0);
+ return PTR_ERR_OR_ZERO(tfm);
+}
+
+static void __exit libcrc32c_mod_fini(void)
+{
+ crypto_free_shash(tfm);
+}
+
+module_init(libcrc32c_mod_init);
+module_exit(libcrc32c_mod_fini);
+
+MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
+MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc32c");
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
new file mode 100644
index 000000000..a1a7dfa88
--- /dev/null
+++ b/lib/linear_ranges.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * helpers to map values in a linear range to range index
+ *
+ * Original idea borrowed from regulator framework
+ *
+ * It might be useful if we could support also inversely proportional ranges?
+ * Copyright 2020 ROHM Semiconductors
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+
+/**
+ * linear_range_values_in_range - return the amount of values in a range
+ * @r: pointer to linear range where values are counted
+ *
+ * Compute the amount of values in range pointed by @r. Note, values can
+ * be all equal - range with selectors 0,...,2 with step 0 still contains
+ * 3 values even though they are all equal.
+ *
+ * Return: the amount of values in range pointed by @r
+ */
+unsigned int linear_range_values_in_range(const struct linear_range *r)
+{
+ if (!r)
+ return 0;
+ return r->max_sel - r->min_sel + 1;
+}
+EXPORT_SYMBOL_GPL(linear_range_values_in_range);
+
+/**
+ * linear_range_values_in_range_array - return the amount of values in ranges
+ * @r: pointer to array of linear ranges where values are counted
+ * @ranges: amount of ranges we include in computation.
+ *
+ * Compute the amount of values in ranges pointed by @r. Note, values can
+ * be all equal - range with selectors 0,...,2 with step 0 still contains
+ * 3 values even though they are all equal.
+ *
+ * Return: the amount of values in first @ranges ranges pointed by @r
+ */
+unsigned int linear_range_values_in_range_array(const struct linear_range *r,
+ int ranges)
+{
+ int i, values_in_range = 0;
+
+ for (i = 0; i < ranges; i++) {
+ int values;
+
+ values = linear_range_values_in_range(&r[i]);
+ if (!values)
+ return values;
+
+ values_in_range += values;
+ }
+ return values_in_range;
+}
+EXPORT_SYMBOL_GPL(linear_range_values_in_range_array);
+
+/**
+ * linear_range_get_max_value - return the largest value in a range
+ * @r: pointer to linear range where value is looked from
+ *
+ * Return: the largest value in the given range
+ */
+unsigned int linear_range_get_max_value(const struct linear_range *r)
+{
+ return r->min + (r->max_sel - r->min_sel) * r->step;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_max_value);
+
+/**
+ * linear_range_get_value - fetch a value from given range
+ * @r: pointer to linear range where value is looked from
+ * @selector: selector for which the value is searched
+ * @val: address where found value is updated
+ *
+ * Search given ranges for value which matches given selector.
+ *
+ * Return: 0 on success, -EINVAL given selector is not found from any of the
+ * ranges.
+ */
+int linear_range_get_value(const struct linear_range *r, unsigned int selector,
+ unsigned int *val)
+{
+ if (r->min_sel > selector || r->max_sel < selector)
+ return -EINVAL;
+
+ *val = r->min + (selector - r->min_sel) * r->step;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_value);
+
+/**
+ * linear_range_get_value_array - fetch a value from array of ranges
+ * @r: pointer to array of linear ranges where value is looked from
+ * @ranges: amount of ranges in an array
+ * @selector: selector for which the value is searched
+ * @val: address where found value is updated
+ *
+ * Search through an array of ranges for value which matches given selector.
+ *
+ * Return: 0 on success, -EINVAL given selector is not found from any of the
+ * ranges.
+ */
+int linear_range_get_value_array(const struct linear_range *r, int ranges,
+ unsigned int selector, unsigned int *val)
+{
+ int i;
+
+ for (i = 0; i < ranges; i++)
+ if (r[i].min_sel <= selector && r[i].max_sel >= selector)
+ return linear_range_get_value(&r[i], selector, val);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_value_array);
+
+/**
+ * linear_range_get_selector_low - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or smaller than given
+ * value. If given value is in the range, then @found is set true.
+ *
+ * Return: 0 on success, -EINVAL if range is invalid or does not contain
+ * value smaller or equal to given value
+ */
+int linear_range_get_selector_low(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found)
+{
+ *found = false;
+
+ if (r->min > val)
+ return -EINVAL;
+
+ if (linear_range_get_max_value(r) < val) {
+ *selector = r->max_sel;
+ return 0;
+ }
+
+ *found = true;
+
+ if (r->step == 0)
+ *selector = r->min_sel;
+ else
+ *selector = (val - r->min) / r->step + r->min_sel;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_low);
+
+/**
+ * linear_range_get_selector_low_array - return linear range selector for value
+ * @r: pointer to array of linear ranges where selector is looked from
+ * @ranges: amount of ranges to scan from array
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Scan array of ranges for selector for which range value matches given
+ * input value. Value is matching if it is equal or smaller than given
+ * value. If given value is found to be in a range scanning is stopped and
+ * @found is set true. If a range with values smaller than given value is found
+ * but the range max is being smaller than given value, then the range's
+ * biggest selector is updated to @selector but scanning ranges is continued
+ * and @found is set to false.
+ *
+ * Return: 0 on success, -EINVAL if range array is invalid or does not contain
+ * range with a value smaller or equal to given value
+ */
+int linear_range_get_selector_low_array(const struct linear_range *r,
+ int ranges, unsigned int val,
+ unsigned int *selector, bool *found)
+{
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ranges; i++) {
+ int tmpret;
+
+ tmpret = linear_range_get_selector_low(&r[i], val, selector,
+ found);
+ if (!tmpret)
+ ret = 0;
+
+ if (*found)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array);
+
+/**
+ * linear_range_get_selector_high - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ * @found: flag to indicate that given value was in the range
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or higher than given
+ * value. If given value is in the range, then @found is set true.
+ *
+ * Return: 0 on success, -EINVAL if range is invalid or does not contain
+ * value greater or equal to given value
+ */
+int linear_range_get_selector_high(const struct linear_range *r,
+ unsigned int val, unsigned int *selector,
+ bool *found)
+{
+ *found = false;
+
+ if (linear_range_get_max_value(r) < val)
+ return -EINVAL;
+
+ if (r->min > val) {
+ *selector = r->min_sel;
+ return 0;
+ }
+
+ *found = true;
+
+ if (r->step == 0)
+ *selector = r->max_sel;
+ else
+ *selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
+
+/**
+ * linear_range_get_selector_within - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or lower than given
+ * value. But return maximum selector if given value is higher than
+ * maximum value.
+ */
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector)
+{
+ if (r->min > val) {
+ *selector = r->min_sel;
+ return;
+ }
+
+ if (linear_range_get_max_value(r) < val) {
+ *selector = r->max_sel;
+ return;
+ }
+
+ if (r->step == 0)
+ *selector = r->min_sel;
+ else
+ *selector = (val - r->min) / r->step + r->min_sel;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
+
+MODULE_DESCRIPTION("linear-ranges helper");
+MODULE_LICENSE("GPL");
diff --git a/lib/list-test.c b/lib/list-test.c
new file mode 100644
index 000000000..0cc27de9c
--- /dev/null
+++ b/lib/list-test.c
@@ -0,0 +1,1502 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the Kernel Linked-list structures.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/list.h>
+#include <linux/klist.h>
+
+struct list_test_struct {
+ int data;
+ struct list_head list;
+};
+
+static void list_test_list_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct list_head list1 = LIST_HEAD_INIT(list1);
+ struct list_head list2;
+ LIST_HEAD(list3);
+ struct list_head *list4;
+ struct list_head *list5;
+
+ INIT_LIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_LIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_LIST_HEAD(list5);
+
+ /* list_empty_careful() checks both next and prev. */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list3));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list4));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void list_test_list_add(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add(&a, &list);
+ list_add(&b, &list);
+
+ /* should be [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_add_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* should be [list] -> a -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a);
+ KUNIT_EXPECT_PTR_EQ(test, a.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &b);
+}
+
+static void list_test_list_del(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+}
+
+static void list_test_list_replace(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+}
+
+static void list_test_list_replace_init(struct kunit *test)
+{
+ struct list_head a_old, a_new, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a_old, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a_old -> b */
+ list_replace_init(&a_old, &a_new);
+
+ /* now: [list] -> a_new -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
+
+ /* check a_old is empty (initialized) */
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
+}
+
+static void list_test_list_swap(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_swap(&a, &b);
+
+ /* after: [list] -> b -> a */
+ KUNIT_EXPECT_PTR_EQ(test, &b, list.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, list.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+ KUNIT_EXPECT_PTR_EQ(test, &list, b.prev);
+
+ KUNIT_EXPECT_PTR_EQ(test, &list, a.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.prev);
+}
+
+static void list_test_list_del_init(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_del_init_careful(struct kunit *test)
+{
+ /* NOTE: This test only checks the behaviour of this function in
+ * isolation. It does not verify memory model guarantees.
+ */
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_del_init_careful(&a);
+ /* after: [list] -> b, a initialised */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
+}
+
+static void list_test_list_move(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move(&a, &list2);
+ /* after: [list1] empty, [list2] -> a -> b */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &a, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &b, a.next);
+}
+
+static void list_test_list_move_tail(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+ list_add_tail(&b, &list2);
+
+ /* before: [list1] -> a, [list2] -> b */
+ list_move_tail(&a, &list2);
+ /* after: [list1] empty, [list2] -> b -> a */
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&list1));
+
+ KUNIT_EXPECT_PTR_EQ(test, &b, list2.next);
+ KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
+}
+
+static void list_test_list_bulk_move_tail(struct kunit *test)
+{
+ struct list_head a, b, c, d, x, y;
+ struct list_head *list1_values[] = { &x, &b, &c, &y };
+ struct list_head *list2_values[] = { &a, &d };
+ struct list_head *ptr;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&x, &list1);
+ list_add_tail(&y, &list1);
+
+ list_add_tail(&a, &list2);
+ list_add_tail(&b, &list2);
+ list_add_tail(&c, &list2);
+ list_add_tail(&d, &list2);
+
+ /* before: [list1] -> x -> y, [list2] -> a -> b -> c -> d */
+ list_bulk_move_tail(&y, &b, &c);
+ /* after: [list1] -> x -> b -> c -> y, [list2] -> a -> d */
+
+ list_for_each(ptr, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list1_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+ i = 0;
+ list_for_each(ptr, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list2_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 2);
+}
+
+static void list_test_list_is_head(struct kunit *test)
+{
+ struct list_head a, b, c;
+
+ /* Two lists: [a] -> b, [c] */
+ INIT_LIST_HEAD(&a);
+ INIT_LIST_HEAD(&c);
+ list_add_tail(&b, &a);
+
+ KUNIT_EXPECT_TRUE_MSG(test, list_is_head(&a, &a),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &b),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &c),
+ "Head element of different list");
+}
+
+
+static void list_test_list_is_first(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_TRUE(test, list_is_first(&a, &list));
+ KUNIT_EXPECT_FALSE(test, list_is_first(&b, &list));
+}
+
+static void list_test_list_is_last(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ KUNIT_EXPECT_FALSE(test, list_is_last(&a, &list));
+ KUNIT_EXPECT_TRUE(test, list_is_last(&b, &list));
+}
+
+static void list_test_list_empty(struct kunit *test)
+{
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty(&list2));
+}
+
+static void list_test_list_empty_careful(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct list_head a;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+
+ list_add_tail(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, list_empty_careful(&list1));
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_rotate_left(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+
+ /* before: [list] -> a -> b */
+ list_rotate_left(&list);
+ /* after: [list] -> b -> a */
+
+ KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
+}
+
+static void list_test_list_rotate_to_front(struct kunit *test)
+{
+ struct list_head a, b, c, d;
+ struct list_head *list_values[] = { &c, &d, &a, &b };
+ struct list_head *ptr;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&a, &list);
+ list_add_tail(&b, &list);
+ list_add_tail(&c, &list);
+ list_add_tail(&d, &list);
+
+ /* before: [list] -> a -> b -> c -> d */
+ list_rotate_to_front(&c, &list);
+ /* after: [list] -> c -> d -> a -> b */
+
+ list_for_each(ptr, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, ptr, list_values[i]);
+ i++;
+ }
+ KUNIT_EXPECT_EQ(test, i, 4);
+}
+
+static void list_test_list_is_singular(struct kunit *test)
+{
+ struct list_head a, b;
+ LIST_HEAD(list);
+
+ /* [list] empty */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+
+ list_add_tail(&a, &list);
+
+ /* [list] -> a */
+ KUNIT_EXPECT_TRUE(test, list_is_singular(&list));
+
+ list_add_tail(&b, &list);
+
+ /* [list] -> a -> b */
+ KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
+}
+
+static void list_test_list_cut_position(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_position(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 2);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+}
+
+static void list_test_list_cut_before(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list1);
+
+ /* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
+ list_cut_before(&list2, &list1, &entries[1]);
+ /* after: [list2] -> entries[0], [list1] -> entries[1] -> entries[2] */
+
+ list_for_each(cur, &list2) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 1);
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+}
+
+static void list_test_list_splice(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_tail(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_splice_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_init(&list2, &entries[1]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_splice_tail_init(struct kunit *test)
+{
+ struct list_head entries[5], *cur;
+ LIST_HEAD(list1);
+ LIST_HEAD(list2);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list1);
+ list_add_tail(&entries[1], &list1);
+ list_add_tail(&entries[2], &list2);
+ list_add_tail(&entries[3], &list2);
+ list_add_tail(&entries[4], &list1);
+
+ /* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
+ list_splice_tail_init(&list2, &entries[4]);
+ /* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
+
+ list_for_each(cur, &list1) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+
+ KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
+}
+
+static void list_test_list_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct, list_entry(&(test_struct.list),
+ struct list_test_struct, list));
+}
+
+static void list_test_list_entry_is_head(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2, test_struct3;
+
+ INIT_LIST_HEAD(&test_struct1.list);
+ INIT_LIST_HEAD(&test_struct3.list);
+
+ list_add_tail(&test_struct2.list, &test_struct1.list);
+
+ KUNIT_EXPECT_TRUE_MSG(test,
+ list_entry_is_head((&test_struct1), &test_struct1.list, list),
+ "Head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct2), &test_struct1.list, list),
+ "Non-head element of same list");
+ KUNIT_EXPECT_FALSE_MSG(test,
+ list_entry_is_head((&test_struct3), &test_struct1.list, list),
+ "Head element of different list");
+}
+
+static void list_test_list_first_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_first_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_last_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_last_entry(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_first_entry_or_null(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ KUNIT_EXPECT_FALSE(test, list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1,
+ list_first_entry_or_null(&list,
+ struct list_test_struct, list));
+}
+
+static void list_test_list_next_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_next_entry(&test_struct1,
+ list));
+}
+
+static void list_test_list_prev_entry(struct kunit *test)
+{
+ struct list_test_struct test_struct1, test_struct2;
+ LIST_HEAD(list);
+
+ list_add_tail(&test_struct1.list, &list);
+ list_add_tail(&test_struct2.list, &list);
+
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_prev_entry(&test_struct2,
+ list));
+}
+
+static void list_test_list_for_each(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+static void list_test_list_for_each_prev(struct kunit *test)
+{
+ struct list_head entries[3], *cur;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void list_test_list_for_each_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 0;
+
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_prev_safe(struct kunit *test)
+{
+ struct list_head entries[3], *cur, *n;
+ LIST_HEAD(list);
+ int i = 2;
+
+ list_add_tail(&entries[0], &list);
+ list_add_tail(&entries[1], &list);
+ list_add_tail(&entries[2], &list);
+
+ list_for_each_prev_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ list_del(&entries[i]);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+ KUNIT_EXPECT_TRUE(test, list_empty(&list));
+}
+
+static void list_test_list_for_each_entry(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 0;
+
+ list_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void list_test_list_for_each_entry_reverse(struct kunit *test)
+{
+ struct list_test_struct entries[5], *cur;
+ LIST_HEAD(list);
+ int i = 0;
+
+ for (i = 0; i < 5; ++i) {
+ entries[i].data = i;
+ list_add_tail(&entries[i].list, &list);
+ }
+
+ i = 4;
+
+ list_for_each_entry_reverse(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static struct kunit_case list_test_cases[] = {
+ KUNIT_CASE(list_test_list_init),
+ KUNIT_CASE(list_test_list_add),
+ KUNIT_CASE(list_test_list_add_tail),
+ KUNIT_CASE(list_test_list_del),
+ KUNIT_CASE(list_test_list_replace),
+ KUNIT_CASE(list_test_list_replace_init),
+ KUNIT_CASE(list_test_list_swap),
+ KUNIT_CASE(list_test_list_del_init),
+ KUNIT_CASE(list_test_list_del_init_careful),
+ KUNIT_CASE(list_test_list_move),
+ KUNIT_CASE(list_test_list_move_tail),
+ KUNIT_CASE(list_test_list_bulk_move_tail),
+ KUNIT_CASE(list_test_list_is_head),
+ KUNIT_CASE(list_test_list_is_first),
+ KUNIT_CASE(list_test_list_is_last),
+ KUNIT_CASE(list_test_list_empty),
+ KUNIT_CASE(list_test_list_empty_careful),
+ KUNIT_CASE(list_test_list_rotate_left),
+ KUNIT_CASE(list_test_list_rotate_to_front),
+ KUNIT_CASE(list_test_list_is_singular),
+ KUNIT_CASE(list_test_list_cut_position),
+ KUNIT_CASE(list_test_list_cut_before),
+ KUNIT_CASE(list_test_list_splice),
+ KUNIT_CASE(list_test_list_splice_tail),
+ KUNIT_CASE(list_test_list_splice_init),
+ KUNIT_CASE(list_test_list_splice_tail_init),
+ KUNIT_CASE(list_test_list_entry),
+ KUNIT_CASE(list_test_list_entry_is_head),
+ KUNIT_CASE(list_test_list_first_entry),
+ KUNIT_CASE(list_test_list_last_entry),
+ KUNIT_CASE(list_test_list_first_entry_or_null),
+ KUNIT_CASE(list_test_list_next_entry),
+ KUNIT_CASE(list_test_list_prev_entry),
+ KUNIT_CASE(list_test_list_for_each),
+ KUNIT_CASE(list_test_list_for_each_prev),
+ KUNIT_CASE(list_test_list_for_each_safe),
+ KUNIT_CASE(list_test_list_for_each_prev_safe),
+ KUNIT_CASE(list_test_list_for_each_entry),
+ KUNIT_CASE(list_test_list_for_each_entry_reverse),
+ {},
+};
+
+static struct kunit_suite list_test_module = {
+ .name = "list-kunit-test",
+ .test_cases = list_test_cases,
+};
+
+struct hlist_test_struct {
+ int data;
+ struct hlist_node list;
+};
+
+static void hlist_test_init(struct kunit *test)
+{
+ /* Test the different ways of initialising a list. */
+ struct hlist_head list1 = HLIST_HEAD_INIT;
+ struct hlist_head list2;
+ HLIST_HEAD(list3);
+ struct hlist_head *list4;
+ struct hlist_head *list5;
+
+ INIT_HLIST_HEAD(&list2);
+
+ list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
+ INIT_HLIST_HEAD(list4);
+
+ list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
+ memset(list5, 0xFF, sizeof(*list5));
+ INIT_HLIST_HEAD(list5);
+
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list3));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list4));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(list5));
+
+ kfree(list4);
+ kfree(list5);
+}
+
+static void hlist_test_unhashed(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
+}
+
+/* Doesn't test concurrency guarantees */
+static void hlist_test_unhashed_lockless(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+
+ /* is unhashed by default */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+
+ hlist_add_head(&a, &list);
+
+ /* is hashed once added to list */
+ KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a));
+
+ hlist_del_init(&a);
+
+ /* is again unhashed after del_init */
+ KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
+}
+
+static void hlist_test_del(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+}
+
+static void hlist_test_del_init(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_behind(&b, &a);
+
+ /* before: [list] -> a -> b */
+ hlist_del_init(&a);
+
+ /* now: [list] -> b */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+ KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
+
+ /* a is now initialised */
+ KUNIT_EXPECT_PTR_EQ(test, a.next, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL);
+}
+
+/* Tests all three hlist_add_* functions */
+static void hlist_test_add(struct kunit *test)
+{
+ struct hlist_node a, b, c, d;
+ HLIST_HEAD(list);
+
+ hlist_add_head(&a, &list);
+ hlist_add_head(&b, &list);
+ hlist_add_before(&c, &a);
+ hlist_add_behind(&d, &a);
+
+ /* should be [list] -> b -> c -> a -> d */
+ KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
+
+ KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next));
+ KUNIT_EXPECT_PTR_EQ(test, b.next, &c);
+
+ KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next));
+ KUNIT_EXPECT_PTR_EQ(test, c.next, &a);
+
+ KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next));
+ KUNIT_EXPECT_PTR_EQ(test, a.next, &d);
+}
+
+/* Tests both hlist_fake() and hlist_add_fake() */
+static void hlist_test_fake(struct kunit *test)
+{
+ struct hlist_node a;
+
+ INIT_HLIST_NODE(&a);
+
+ /* not fake after init */
+ KUNIT_EXPECT_FALSE(test, hlist_fake(&a));
+
+ hlist_add_fake(&a);
+
+ /* is now fake */
+ KUNIT_EXPECT_TRUE(test, hlist_fake(&a));
+}
+
+static void hlist_test_is_singular_node(struct kunit *test)
+{
+ struct hlist_node a, b;
+ HLIST_HEAD(list);
+
+ INIT_HLIST_NODE(&a);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&a, &list);
+ KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list));
+
+ hlist_add_head(&b, &list);
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
+ KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list));
+}
+
+static void hlist_test_empty(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list);
+
+ /* list starts off empty */
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+
+ hlist_add_head(&a, &list);
+
+ /* list is no longer empty */
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list));
+}
+
+static void hlist_test_move_list(struct kunit *test)
+{
+ struct hlist_node a;
+ HLIST_HEAD(list1);
+ HLIST_HEAD(list2);
+
+ hlist_add_head(&a, &list1);
+
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
+ hlist_move_list(&list1, &list2);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
+ KUNIT_EXPECT_FALSE(test, hlist_empty(&list2));
+
+}
+
+static void hlist_test_entry(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry(&(test_struct.list),
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct test_struct;
+
+ KUNIT_EXPECT_PTR_EQ(test, &test_struct,
+ hlist_entry_safe(&(test_struct.list),
+ struct hlist_test_struct, list));
+
+ KUNIT_EXPECT_PTR_EQ(test, NULL,
+ hlist_entry_safe((struct hlist_node *)NULL,
+ struct hlist_test_struct, list));
+}
+
+static void hlist_test_for_each(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each(cur, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+}
+
+
+static void hlist_test_for_each_safe(struct kunit *test)
+{
+ struct hlist_node entries[3], *cur, *n;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ hlist_add_head(&entries[0], &list);
+ hlist_add_behind(&entries[1], &entries[0]);
+ hlist_add_behind(&entries[2], &entries[1]);
+
+ hlist_for_each_safe(cur, n, &list) {
+ KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
+ hlist_del(&entries[i]);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 3);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+static void hlist_test_for_each_entry(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry(cur, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+}
+
+static void hlist_test_for_each_entry_continue(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ /* We skip the first (zero-th) entry. */
+ i = 1;
+
+ cur = &entries[0];
+ hlist_for_each_entry_continue(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was not visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 0);
+ /* The second (and presumably others), were. */
+ KUNIT_EXPECT_EQ(test, entries[1].data, 42);
+}
+
+static void hlist_test_for_each_entry_from(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ cur = &entries[0];
+ hlist_for_each_entry_from(cur, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ /* Stamp over the entry. */
+ cur->data = 42;
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ /* The first entry was visited. */
+ KUNIT_EXPECT_EQ(test, entries[0].data, 42);
+}
+
+static void hlist_test_for_each_entry_safe(struct kunit *test)
+{
+ struct hlist_test_struct entries[5], *cur;
+ struct hlist_node *tmp_node;
+ HLIST_HEAD(list);
+ int i = 0;
+
+ entries[0].data = 0;
+ hlist_add_head(&entries[0].list, &list);
+ for (i = 1; i < 5; ++i) {
+ entries[i].data = i;
+ hlist_add_behind(&entries[i].list, &entries[i-1].list);
+ }
+
+ i = 0;
+
+ hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
+ KUNIT_EXPECT_EQ(test, cur->data, i);
+ hlist_del(&cur->list);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, 5);
+ KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
+}
+
+
+static struct kunit_case hlist_test_cases[] = {
+ KUNIT_CASE(hlist_test_init),
+ KUNIT_CASE(hlist_test_unhashed),
+ KUNIT_CASE(hlist_test_unhashed_lockless),
+ KUNIT_CASE(hlist_test_del),
+ KUNIT_CASE(hlist_test_del_init),
+ KUNIT_CASE(hlist_test_add),
+ KUNIT_CASE(hlist_test_fake),
+ KUNIT_CASE(hlist_test_is_singular_node),
+ KUNIT_CASE(hlist_test_empty),
+ KUNIT_CASE(hlist_test_move_list),
+ KUNIT_CASE(hlist_test_entry),
+ KUNIT_CASE(hlist_test_entry_safe),
+ KUNIT_CASE(hlist_test_for_each),
+ KUNIT_CASE(hlist_test_for_each_safe),
+ KUNIT_CASE(hlist_test_for_each_entry),
+ KUNIT_CASE(hlist_test_for_each_entry_continue),
+ KUNIT_CASE(hlist_test_for_each_entry_from),
+ KUNIT_CASE(hlist_test_for_each_entry_safe),
+ {},
+};
+
+static struct kunit_suite hlist_test_module = {
+ .name = "hlist",
+ .test_cases = hlist_test_cases,
+};
+
+
+struct klist_test_struct {
+ int data;
+ struct klist klist;
+ struct klist_node klist_node;
+};
+
+static int node_count;
+static struct klist_node *last_node;
+
+static void check_node(struct klist_node *node_ptr)
+{
+ node_count++;
+ last_node = node_ptr;
+}
+
+static void check_delete_node(struct klist_node *node_ptr)
+{
+ node_count--;
+ last_node = node_ptr;
+}
+
+static void klist_test_add_tail(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_tail(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_tail(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> a -> b */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_head(struct kunit *test)
+{
+ struct klist_node a, b;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 1);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
+
+ klist_add_head(&b, &mylist);
+ KUNIT_EXPECT_EQ(test, node_count, 2);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
+
+ /* should be [list] -> b -> a */
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_behind(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+
+ klist_add_behind(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_behind(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_add_before(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, NULL);
+
+ klist_add_head(&a, &mylist);
+ klist_add_head(&b, &mylist);
+ klist_add_before(&c, &a);
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ klist_add_before(&d, &b);
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_init(&mylist, &i);
+
+ /* should be [list] -> b -> d -> a -> c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+/*
+ * Verify that klist_del() delays the deletion of a node until there
+ * are no other references to it
+ */
+static void klist_test_del_refcount_greater_than_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ /* Advance the iterator to point to node c*/
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
+
+ /* Try to delete node c while there is a reference to it*/
+ klist_del(&c);
+
+ /*
+ * Verify that node c is still attached to the list even after being
+ * deleted. Since the iterator still points to c, the reference count is not
+ * decreased to 0
+ */
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&c));
+
+ /* Check that node c has not been removed yet*/
+ KUNIT_EXPECT_EQ(test, node_count, 4);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
+
+ klist_iter_exit(&i);
+
+ /*
+ * Since the iterator is no longer pointing to node c, node c is removed
+ * from the list
+ */
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+}
+
+/*
+ * Verify that klist_del() deletes a node immediately when there are no
+ * other references to it.
+ */
+static void klist_test_del_refcount_zero(struct kunit *test)
+{
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_del(&c);
+
+ /* Check that node c is deleted from the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* Should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_remove(struct kunit *test)
+{
+ /* This test doesn't check correctness under concurrent access */
+ struct klist_node a, b, c, d;
+ struct klist mylist;
+ struct klist_iter i;
+
+ node_count = 0;
+ klist_init(&mylist, &check_node, &check_delete_node);
+
+ /* Add nodes a,b,c,d to the list*/
+ klist_add_tail(&a, &mylist);
+ klist_add_tail(&b, &mylist);
+ klist_add_tail(&c, &mylist);
+ klist_add_tail(&d, &mylist);
+ /* Delete node c*/
+ klist_remove(&c);
+
+ /* Check the nodes in the list*/
+ KUNIT_EXPECT_EQ(test, node_count, 3);
+ KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
+
+ /* should be [list] -> a -> b -> d*/
+ klist_iter_init(&mylist, &i);
+
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
+ KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
+ KUNIT_EXPECT_NULL(test, klist_next(&i));
+
+ klist_iter_exit(&i);
+
+}
+
+static void klist_test_node_attached(struct kunit *test)
+{
+ struct klist_node a = {};
+ struct klist mylist;
+
+ klist_init(&mylist, NULL, NULL);
+
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+ klist_add_head(&a, &mylist);
+ KUNIT_EXPECT_TRUE(test, klist_node_attached(&a));
+ klist_del(&a);
+ KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
+
+}
+
+static struct kunit_case klist_test_cases[] = {
+ KUNIT_CASE(klist_test_add_tail),
+ KUNIT_CASE(klist_test_add_head),
+ KUNIT_CASE(klist_test_add_behind),
+ KUNIT_CASE(klist_test_add_before),
+ KUNIT_CASE(klist_test_del_refcount_greater_than_zero),
+ KUNIT_CASE(klist_test_del_refcount_zero),
+ KUNIT_CASE(klist_test_remove),
+ KUNIT_CASE(klist_test_node_attached),
+ {},
+};
+
+static struct kunit_suite klist_test_module = {
+ .name = "klist",
+ .test_cases = klist_test_cases,
+};
+
+kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/list_debug.c b/lib/list_debug.c
new file mode 100644
index 000000000..db602417f
--- /dev/null
+++ b/lib/list_debug.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2006, Red Hat, Inc., Dave Jones
+ * Released under the General Public License (GPL).
+ *
+ * This file contains the linked list validation and error reporting for
+ * LIST_HARDENED and DEBUG_LIST.
+ */
+
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/rculist.h>
+
+/*
+ * Check that the data structures for the list manipulations are reasonably
+ * valid. Failures here indicate memory corruption (and possibly an exploit
+ * attempt).
+ */
+
+__list_valid_slowpath
+bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
+{
+ if (CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_add corruption. prev is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next == NULL,
+ "list_add corruption. next is NULL.\n") ||
+ CHECK_DATA_CORRUPTION(next->prev != prev,
+ "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
+ prev, next->prev, next) ||
+ CHECK_DATA_CORRUPTION(prev->next != next,
+ "list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
+ next, prev->next, prev) ||
+ CHECK_DATA_CORRUPTION(new == prev || new == next,
+ "list_add double add: new=%px, prev=%px, next=%px.\n",
+ new, prev, next))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(__list_add_valid_or_report);
+
+__list_valid_slowpath
+bool __list_del_entry_valid_or_report(struct list_head *entry)
+{
+ struct list_head *prev, *next;
+
+ prev = entry->prev;
+ next = entry->next;
+
+ if (CHECK_DATA_CORRUPTION(next == NULL,
+ "list_del corruption, %px->next is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(prev == NULL,
+ "list_del corruption, %px->prev is NULL\n", entry) ||
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1,
+ "list_del corruption, %px->next is LIST_POISON1 (%px)\n",
+ entry, LIST_POISON1) ||
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
+ "list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
+ entry, LIST_POISON2) ||
+ CHECK_DATA_CORRUPTION(prev->next != entry,
+ "list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
+ entry, prev->next, prev) ||
+ CHECK_DATA_CORRUPTION(next->prev != entry,
+ "list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
+ entry, next->prev, next))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(__list_del_entry_valid_or_report);
diff --git a/lib/list_sort.c b/lib/list_sort.c
new file mode 100644
index 000000000..0fb59e92c
--- /dev/null
+++ b/lib/list_sort.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/list_sort.h>
+#include <linux/list.h>
+
+/*
+ * Returns a list organized in an intermediate format suited
+ * to chaining of merge() calls: null-terminated, no reserved or
+ * sentinel head node, "prev" links not maintained.
+ */
+__attribute__((nonnull(2,3,4)))
+static struct list_head *merge(void *priv, list_cmp_func_t cmp,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *head, **tail = &head;
+
+ for (;;) {
+ /* if equal, take 'a' -- important for sort stability */
+ if (cmp(priv, a, b) <= 0) {
+ *tail = a;
+ tail = &a->next;
+ a = a->next;
+ if (!a) {
+ *tail = b;
+ break;
+ }
+ } else {
+ *tail = b;
+ tail = &b->next;
+ b = b->next;
+ if (!b) {
+ *tail = a;
+ break;
+ }
+ }
+ }
+ return head;
+}
+
+/*
+ * Combine final list merge with restoration of standard doubly-linked
+ * list structure. This approach duplicates code from merge(), but
+ * runs faster than the tidier alternatives of either a separate final
+ * prev-link restoration pass, or maintaining the prev links
+ * throughout.
+ */
+__attribute__((nonnull(2,3,4,5)))
+static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *tail = head;
+ u8 count = 0;
+
+ for (;;) {
+ /* if equal, take 'a' -- important for sort stability */
+ if (cmp(priv, a, b) <= 0) {
+ tail->next = a;
+ a->prev = tail;
+ tail = a;
+ a = a->next;
+ if (!a)
+ break;
+ } else {
+ tail->next = b;
+ b->prev = tail;
+ tail = b;
+ b = b->next;
+ if (!b) {
+ b = a;
+ break;
+ }
+ }
+ }
+
+ /* Finish linking remainder of list b on to tail */
+ tail->next = b;
+ do {
+ /*
+ * If the merge is highly unbalanced (e.g. the input is
+ * already sorted), this loop may run many iterations.
+ * Continue callbacks to the client even though no
+ * element comparison is needed, so the client's cmp()
+ * routine can invoke cond_resched() periodically.
+ */
+ if (unlikely(!++count))
+ cmp(priv, b, b);
+ b->prev = tail;
+ tail = b;
+ b = b->next;
+ } while (b);
+
+ /* And the final links to make a circular doubly-linked list */
+ tail->next = head;
+ head->prev = tail;
+}
+
+/**
+ * list_sort - sort a list
+ * @priv: private data, opaque to list_sort(), passed to @cmp
+ * @head: the list to sort
+ * @cmp: the elements comparison function
+ *
+ * The comparison function @cmp must return > 0 if @a should sort after
+ * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
+ * sort before @b *or* their original order should be preserved. It is
+ * always called with the element that came first in the input in @a,
+ * and list_sort is a stable sort, so it is not necessary to distinguish
+ * the @a < @b and @a == @b cases.
+ *
+ * This is compatible with two styles of @cmp function:
+ * - The traditional style which returns <0 / =0 / >0, or
+ * - Returning a boolean 0/1.
+ * The latter offers a chance to save a few cycles in the comparison
+ * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
+ *
+ * A good way to write a multi-word comparison is::
+ *
+ * if (a->high != b->high)
+ * return a->high > b->high;
+ * if (a->middle != b->middle)
+ * return a->middle > b->middle;
+ * return a->low > b->low;
+ *
+ *
+ * This mergesort is as eager as possible while always performing at least
+ * 2:1 balanced merges. Given two pending sublists of size 2^k, they are
+ * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
+ *
+ * Thus, it will avoid cache thrashing as long as 3*2^k elements can
+ * fit into the cache. Not quite as good as a fully-eager bottom-up
+ * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
+ * the common case that everything fits into L1.
+ *
+ *
+ * The merging is controlled by "count", the number of elements in the
+ * pending lists. This is beautifully simple code, but rather subtle.
+ *
+ * Each time we increment "count", we set one bit (bit k) and clear
+ * bits k-1 .. 0. Each time this happens (except the very first time
+ * for each bit, when count increments to 2^k), we merge two lists of
+ * size 2^k into one list of size 2^(k+1).
+ *
+ * This merge happens exactly when the count reaches an odd multiple of
+ * 2^k, which is when we have 2^k elements pending in smaller lists,
+ * so it's safe to merge away two lists of size 2^k.
+ *
+ * After this happens twice, we have created two lists of size 2^(k+1),
+ * which will be merged into a list of size 2^(k+2) before we create
+ * a third list of size 2^(k+1), so there are never more than two pending.
+ *
+ * The number of pending lists of size 2^k is determined by the
+ * state of bit k of "count" plus two extra pieces of information:
+ *
+ * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
+ * - Whether the higher-order bits are zero or non-zero (i.e.
+ * is count >= 2^(k+1)).
+ *
+ * There are six states we distinguish. "x" represents some arbitrary
+ * bits, and "y" represents some arbitrary non-zero bits:
+ * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
+ * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
+ * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
+ * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * (merge and loop back to state 2)
+ *
+ * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
+ * bit k-1 is set while the more significant bits are non-zero) and
+ * merge them away in the 5->2 transition. Note in particular that just
+ * before the 5->2 transition, all lower-order bits are 11 (state 3),
+ * so there is one list of each smaller size.
+ *
+ * When we reach the end of the input, we merge all the pending
+ * lists, from smallest to largest. If you work through cases 2 to
+ * 5 above, you can see that the number of elements we merge with a list
+ * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
+ * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
+ */
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
+{
+ struct list_head *list = head->next, *pending = NULL;
+ size_t count = 0; /* Count of pending */
+
+ if (list == head->prev) /* Zero or one elements */
+ return;
+
+ /* Convert to a null-terminated singly-linked list. */
+ head->prev->next = NULL;
+
+ /*
+ * Data structure invariants:
+ * - All lists are singly linked and null-terminated; prev
+ * pointers are not maintained.
+ * - pending is a prev-linked "list of lists" of sorted
+ * sublists awaiting further merging.
+ * - Each of the sorted sublists is power-of-two in size.
+ * - Sublists are sorted by size and age, smallest & newest at front.
+ * - There are zero to two sublists of each size.
+ * - A pair of pending sublists are merged as soon as the number
+ * of following pending elements equals their size (i.e.
+ * each time count reaches an odd multiple of that size).
+ * That ensures each later final merge will be at worst 2:1.
+ * - Each round consists of:
+ * - Merging the two sublists selected by the highest bit
+ * which flips when count is incremented, and
+ * - Adding an element from the input as a size-1 sublist.
+ */
+ do {
+ size_t bits;
+ struct list_head **tail = &pending;
+
+ /* Find the least-significant clear bit in count */
+ for (bits = count; bits & 1; bits >>= 1)
+ tail = &(*tail)->prev;
+ /* Do the indicated merge */
+ if (likely(bits)) {
+ struct list_head *a = *tail, *b = a->prev;
+
+ a = merge(priv, cmp, b, a);
+ /* Install the merged result in place of the inputs */
+ a->prev = b->prev;
+ *tail = a;
+ }
+
+ /* Move one element from input list to pending */
+ list->prev = pending;
+ pending = list;
+ list = list->next;
+ pending->next = NULL;
+ count++;
+ } while (list);
+
+ /* End of input; merge together all the pending lists. */
+ list = pending;
+ pending = pending->prev;
+ for (;;) {
+ struct list_head *next = pending->prev;
+
+ if (!next)
+ break;
+ list = merge(priv, cmp, pending, list);
+ pending = next;
+ }
+ /* The final merge, rebuilding prev links */
+ merge_final(priv, cmp, head, pending, list);
+}
+EXPORT_SYMBOL(list_sort);
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
new file mode 100644
index 000000000..dcc912b34
--- /dev/null
+++ b/lib/livepatch/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for livepatch test code.
+
+obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
+ test_klp_callbacks_demo.o \
+ test_klp_callbacks_demo2.o \
+ test_klp_callbacks_busy.o \
+ test_klp_callbacks_mod.o \
+ test_klp_livepatch.o \
+ test_klp_shadow_vars.o \
+ test_klp_state.o \
+ test_klp_state2.o \
+ test_klp_state3.o
diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c
new file mode 100644
index 000000000..5af7093ca
--- /dev/null
+++ b/lib/livepatch/test_klp_atomic_replace.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int replace;
+module_param(replace, int, 0644);
+MODULE_PARM_DESC(replace, "replace (default=0)");
+
+#include <linux/seq_file.h>
+static int livepatch_meminfo_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s: %s\n", THIS_MODULE->name,
+ "this has been live patched");
+ return 0;
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "meminfo_proc_show",
+ .new_func = livepatch_meminfo_proc_show,
+ }, {}
+};
+
+static struct klp_object objs[] = {
+ {
+ /* name being NULL means vmlinux */
+ .funcs = funcs,
+ }, {}
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ /* set .replace in the init function below for demo purposes */
+};
+
+static int test_klp_atomic_replace_init(void)
+{
+ patch.replace = replace;
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_atomic_replace_exit(void)
+{
+}
+
+module_init(test_klp_atomic_replace_init);
+module_exit(test_klp_atomic_replace_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: atomic replace");
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
new file mode 100644
index 000000000..133929e0c
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_busy.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+/* load/run-time control from sysfs writer */
+static bool block_transition;
+module_param(block_transition, bool, 0644);
+MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
+
+static void busymod_work_func(struct work_struct *work);
+static DECLARE_WORK(work, busymod_work_func);
+static DECLARE_COMPLETION(busymod_work_started);
+
+static void busymod_work_func(struct work_struct *work)
+{
+ pr_info("%s enter\n", __func__);
+ complete(&busymod_work_started);
+
+ while (READ_ONCE(block_transition)) {
+ /*
+ * Busy-wait until the sysfs writer has acknowledged a
+ * blocked transition and clears the flag.
+ */
+ msleep(20);
+ }
+
+ pr_info("%s exit\n", __func__);
+}
+
+static int test_klp_callbacks_busy_init(void)
+{
+ pr_info("%s\n", __func__);
+ schedule_work(&work);
+
+ /*
+ * To synchronize kernel messages, hold the init function from
+ * exiting until the work function's entry message has printed.
+ */
+ wait_for_completion(&busymod_work_started);
+
+ if (!block_transition) {
+ /*
+ * Serialize output: print all messages from the work
+ * function before returning from init().
+ */
+ flush_work(&work);
+ }
+
+ return 0;
+}
+
+static void test_klp_callbacks_busy_exit(void)
+{
+ WRITE_ONCE(block_transition, false);
+ flush_work(&work);
+ pr_info("%s\n", __func__);
+}
+
+module_init(test_klp_callbacks_busy_init);
+module_exit(test_klp_callbacks_busy_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: busy target module");
diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c
new file mode 100644
index 000000000..3fd8fe1cd
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_demo.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int pre_patch_ret;
+module_param(pre_patch_ret, int, 0644);
+MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return pre_patch_ret;
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+static void patched_work_func(struct work_struct *work)
+{
+ pr_info("%s\n", __func__);
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_func busymod_funcs[] = {
+ {
+ .old_name = "busymod_work_func",
+ .new_func = patched_work_func,
+ }, {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "test_klp_callbacks_mod",
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "test_klp_callbacks_busy",
+ .funcs = busymod_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: livepatch demo");
diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c
new file mode 100644
index 000000000..5417573e8
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_demo2.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int replace;
+module_param(replace, int, 0644);
+MODULE_PARM_DESC(replace, "replace (default=0)");
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return 0;
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+static struct klp_func no_funcs[] = {
+ { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ /* set .replace in the init function below for demo purposes */
+};
+
+static int test_klp_callbacks_demo2_init(void)
+{
+ patch.replace = replace;
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo2_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo2_init);
+module_exit(test_klp_callbacks_demo2_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: livepatch demo2");
diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c
new file mode 100644
index 000000000..8fbe645b1
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_mod.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+static int test_klp_callbacks_mod_init(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static void test_klp_callbacks_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+}
+
+module_init(test_klp_callbacks_mod_init);
+module_exit(test_klp_callbacks_mod_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: target module");
diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c
new file mode 100644
index 000000000..aff08199d
--- /dev/null
+++ b/lib/livepatch/test_klp_livepatch.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+#include <linux/seq_file.h>
+static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s: %s\n", THIS_MODULE->name,
+ "this has been live patched");
+ return 0;
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "cmdline_proc_show",
+ .new_func = livepatch_cmdline_proc_show,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ /* name being NULL means vmlinux */
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int test_klp_livepatch_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_livepatch_exit(void)
+{
+}
+
+module_init(test_klp_livepatch_init);
+module_exit(test_klp_livepatch_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: livepatch module");
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
new file mode 100644
index 000000000..b99116490
--- /dev/null
+++ b/lib/livepatch/test_klp_shadow_vars.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/livepatch.h>
+#include <linux/slab.h>
+
+/*
+ * Keep a small list of pointers so that we can print address-agnostic
+ * pointer values. Use a rolling integer count to differentiate the values.
+ * Ironically we could have used the shadow variable API to do this, but
+ * let's not lean too heavily on the very code we're testing.
+ */
+static LIST_HEAD(ptr_list);
+struct shadow_ptr {
+ void *ptr;
+ int id;
+ struct list_head list;
+};
+
+static void free_ptr_list(void)
+{
+ struct shadow_ptr *sp, *tmp_sp;
+
+ list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) {
+ list_del(&sp->list);
+ kfree(sp);
+ }
+}
+
+static int ptr_id(void *ptr)
+{
+ struct shadow_ptr *sp;
+ static int count;
+
+ list_for_each_entry(sp, &ptr_list, list) {
+ if (sp->ptr == ptr)
+ return sp->id;
+ }
+
+ sp = kmalloc(sizeof(*sp), GFP_ATOMIC);
+ if (!sp)
+ return -ENOMEM;
+ sp->ptr = ptr;
+ sp->id = count++;
+
+ list_add(&sp->list, &ptr_list);
+
+ return sp->id;
+}
+
+/*
+ * Shadow variable wrapper functions that echo the function and arguments
+ * to the kernel log for testing verification. Don't display raw pointers,
+ * but use the ptr_id() value instead.
+ */
+static void *shadow_get(void *obj, unsigned long id)
+{
+ int **sv;
+
+ sv = klp_shadow_get(obj, id);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
+ __func__, ptr_id(obj), id, ptr_id(sv));
+
+ return sv;
+}
+
+static void *shadow_alloc(void *obj, unsigned long id, size_t size,
+ gfp_t gfp_flags, klp_shadow_ctor_t ctor,
+ void *ctor_data)
+{
+ int **var = ctor_data;
+ int **sv;
+
+ sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
+ __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
+ ptr_id(*var), ptr_id(sv));
+
+ return sv;
+}
+
+static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
+ gfp_t gfp_flags, klp_shadow_ctor_t ctor,
+ void *ctor_data)
+{
+ int **var = ctor_data;
+ int **sv;
+
+ sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
+ __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
+ ptr_id(*var), ptr_id(sv));
+
+ return sv;
+}
+
+static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
+{
+ klp_shadow_free(obj, id, dtor);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n",
+ __func__, ptr_id(obj), id, ptr_id(dtor));
+}
+
+static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
+{
+ klp_shadow_free_all(id, dtor);
+ pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
+}
+
+
+/* Shadow variable constructor - remember simple pointer data */
+static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
+{
+ int **sv = shadow_data;
+ int **var = ctor_data;
+
+ if (!var)
+ return -EINVAL;
+
+ *sv = *var;
+ pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
+
+ return 0;
+}
+
+/*
+ * With more than one item to free in the list, order is not determined and
+ * shadow_dtor will not be passed to shadow_free_all() which would make the
+ * test fail. (see pass 6)
+ */
+static void shadow_dtor(void *obj, void *shadow_data)
+{
+ int **sv = shadow_data;
+
+ pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
+ __func__, ptr_id(obj), ptr_id(sv));
+}
+
+/* number of objects we simulate that need shadow vars */
+#define NUM_OBJS 3
+
+/* dynamically created obj fields have the following shadow var id values */
+#define SV_ID1 0x1234
+#define SV_ID2 0x1235
+
+/*
+ * The main test case adds/removes new fields (shadow var) to each of these
+ * test structure instances. The last group of fields in the struct represent
+ * the idea that shadow variables may be added and removed to and from the
+ * struct during execution.
+ */
+struct test_object {
+ /* add anything here below and avoid to define an empty struct */
+ struct shadow_ptr sp;
+
+ /* these represent shadow vars added and removed with SV_ID{1,2} */
+ /* char nfield1; */
+ /* int nfield2; */
+};
+
+static int test_klp_shadow_vars_init(void)
+{
+ struct test_object objs[NUM_OBJS];
+ char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
+ char *pndup[NUM_OBJS];
+ int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
+ void **sv;
+ int ret;
+ int i;
+
+ ptr_id(NULL);
+
+ /*
+ * With an empty shadow variable hash table, expect not to find
+ * any matches.
+ */
+ sv = shadow_get(&objs[0], SV_ID1);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+
+ /* pass 1: init & alloc a char+int pair of svars for each objs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pnfields1[i] = &nfields1[i];
+ ptr_id(pnfields1[i]);
+
+ if (i % 2) {
+ sv1[i] = shadow_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ } else {
+ sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
+ sizeof(pnfields1[i]), GFP_KERNEL,
+ shadow_ctor, &pnfields1[i]);
+ }
+ if (!sv1[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnfields2[i] = &nfields2[i];
+ ptr_id(pnfields2[i]);
+ sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
+ GFP_KERNEL, shadow_ctor, &pnfields2[i]);
+ if (!sv2[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* pass 2: verify we find allocated svars and where they point to */
+ for (i = 0; i < NUM_OBJS; i++) {
+ /* check the "char" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+
+ /* check the "int" svar for all objects */
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
+
+ /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
+ for (i = 0; i < NUM_OBJS; i++) {
+ pndup[i] = &nfields1[i];
+ ptr_id(pndup[i]);
+
+ sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
+ GFP_KERNEL, shadow_ctor, &pndup[i]);
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1[i]), ptr_id(*sv1[i]));
+ }
+
+ /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
+ for (i = 0; i < NUM_OBJS; i++) {
+ shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
+ sv = shadow_get(&objs[i], SV_ID1);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
+
+ /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
+ if (!sv) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2[i]), ptr_id(*sv2[i]));
+ }
+
+ /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ for (i = 0; i < NUM_OBJS; i++) {
+ sv = shadow_get(&objs[i], SV_ID2);
+ if (!sv)
+ pr_info(" got expected NULL result\n");
+ }
+
+ free_ptr_list();
+
+ return 0;
+out:
+ shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
+ shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
+ free_ptr_list();
+
+ return ret;
+}
+
+static void test_klp_shadow_vars_exit(void)
+{
+}
+
+module_init(test_klp_shadow_vars_init);
+module_exit(test_klp_shadow_vars_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: shadow variables");
diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c
new file mode 100644
index 000000000..57a4253ac
--- /dev/null
+++ b/lib/livepatch/test_klp_state.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/livepatch.h>
+
+#define CONSOLE_LOGLEVEL_STATE 1
+/* Version 1 does not support migration. */
+#define CONSOLE_LOGLEVEL_STATE_VERSION 1
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+static struct klp_patch patch;
+
+static int allocate_loglevel_state(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return -EINVAL;
+
+ loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
+ if (!loglevel_state->data)
+ return -ENOMEM;
+
+ pr_info("%s: allocating space to store console_loglevel\n",
+ __func__);
+ return 0;
+}
+
+static void fix_console_loglevel(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: fixing console_loglevel\n", __func__);
+ *(int *)loglevel_state->data = console_loglevel;
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+static void restore_console_loglevel(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: restoring console_loglevel\n", __func__);
+ console_loglevel = *(int *)loglevel_state->data;
+}
+
+static void free_loglevel_state(void)
+{
+ struct klp_state *loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: freeing space for the stored console_loglevel\n",
+ __func__);
+ kfree(loglevel_state->data);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return allocate_loglevel_state();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ fix_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ restore_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ free_loglevel_state();
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_state states[] = {
+ {
+ .id = CONSOLE_LOGLEVEL_STATE,
+ .version = CONSOLE_LOGLEVEL_STATE_VERSION,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ .states = states,
+ .replace = true,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
+MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c
new file mode 100644
index 000000000..c978ea4d5
--- /dev/null
+++ b/lib/livepatch/test_klp_state2.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/livepatch.h>
+
+#define CONSOLE_LOGLEVEL_STATE 1
+/* Version 2 supports migration. */
+#define CONSOLE_LOGLEVEL_STATE_VERSION 2
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+static struct klp_patch patch;
+
+static int allocate_loglevel_state(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: space to store console_loglevel already allocated\n",
+ __func__);
+ return 0;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return -EINVAL;
+
+ loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
+ if (!loglevel_state->data)
+ return -ENOMEM;
+
+ pr_info("%s: allocating space to store console_loglevel\n",
+ __func__);
+ return 0;
+}
+
+static void fix_console_loglevel(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: taking over the console_loglevel change\n",
+ __func__);
+ loglevel_state->data = prev_loglevel_state->data;
+ return;
+ }
+
+ pr_info("%s: fixing console_loglevel\n", __func__);
+ *(int *)loglevel_state->data = console_loglevel;
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+static void restore_console_loglevel(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
+ __func__);
+ return;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: restoring console_loglevel\n", __func__);
+ console_loglevel = *(int *)loglevel_state->data;
+}
+
+static void free_loglevel_state(void)
+{
+ struct klp_state *loglevel_state, *prev_loglevel_state;
+
+ prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
+ if (prev_loglevel_state) {
+ pr_info("%s: keeping space to store console_loglevel\n",
+ __func__);
+ return;
+ }
+
+ loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
+ if (!loglevel_state)
+ return;
+
+ pr_info("%s: freeing space for the stored console_loglevel\n",
+ __func__);
+ kfree(loglevel_state->data);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return allocate_loglevel_state();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ fix_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ restore_console_loglevel();
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ free_loglevel_state();
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_state states[] = {
+ {
+ .id = CONSOLE_LOGLEVEL_STATE,
+ .version = CONSOLE_LOGLEVEL_STATE_VERSION,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ .states = states,
+ .replace = true,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
+MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c
new file mode 100644
index 000000000..9226579d1
--- /dev/null
+++ b/lib/livepatch/test_klp_state3.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 SUSE
+
+/* The console loglevel fix is the same in the next cumulative patch. */
+#include "test_klp_state2.c"
diff --git a/lib/llist.c b/lib/llist.c
new file mode 100644
index 000000000..6e668fa5a
--- /dev/null
+++ b/lib/llist.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Lock-less NULL terminated single linked list
+ *
+ * The basic atomic operation of this list is cmpxchg on long. On
+ * architectures that don't have NMI-safe cmpxchg implementation, the
+ * list can NOT be used in NMI handlers. So code that uses the list in
+ * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2010,2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/llist.h>
+
+
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
+ */
+bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *first = READ_ONCE(head->first);
+
+ do {
+ new_last->next = first;
+ } while (!try_cmpxchg(&head->first, &first, new_first));
+
+ return !first;
+}
+EXPORT_SYMBOL_GPL(llist_add_batch);
+
+/**
+ * llist_del_first - delete the first entry of lock-less list
+ * @head: the head for your lock-less list
+ *
+ * If list is empty, return NULL, otherwise, return the first entry
+ * deleted, this is the newest added one.
+ *
+ * Only one llist_del_first user can be used simultaneously with
+ * multiple llist_add users without lock. Because otherwise
+ * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add,
+ * llist_add) sequence in another user may change @head->first->next,
+ * but keep @head->first. If multiple consumers are needed, please
+ * use llist_del_all or use lock between consumers.
+ */
+struct llist_node *llist_del_first(struct llist_head *head)
+{
+ struct llist_node *entry, *next;
+
+ entry = smp_load_acquire(&head->first);
+ do {
+ if (entry == NULL)
+ return NULL;
+ next = READ_ONCE(entry->next);
+ } while (!try_cmpxchg(&head->first, &entry, next));
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(llist_del_first);
+
+/**
+ * llist_reverse_order - reverse order of a llist chain
+ * @head: first item of the list to be reversed
+ *
+ * Reverse the order of a chain of llist entries and return the
+ * new first entry.
+ */
+struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+ struct llist_node *new_head = NULL;
+
+ while (head) {
+ struct llist_node *tmp = head;
+ head = head->next;
+ tmp->next = new_head;
+ new_head = tmp;
+ }
+
+ return new_head;
+}
+EXPORT_SYMBOL_GPL(llist_reverse_order);
diff --git a/lib/locking-selftest-hardirq.h b/lib/locking-selftest-hardirq.h
new file mode 100644
index 000000000..0d144a6d6
--- /dev/null
+++ b/lib/locking-selftest-hardirq.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef IRQ_DISABLE
+#undef IRQ_ENABLE
+#undef IRQ_ENTER
+#undef IRQ_EXIT
+
+#define IRQ_ENABLE HARDIRQ_ENABLE
+#define IRQ_DISABLE HARDIRQ_DISABLE
+#define IRQ_ENTER HARDIRQ_ENTER
+#define IRQ_EXIT HARDIRQ_EXIT
diff --git a/lib/locking-selftest-mutex.h b/lib/locking-selftest-mutex.h
new file mode 100644
index 000000000..7526c7746
--- /dev/null
+++ b/lib/locking-selftest-mutex.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK ML
+
+#undef UNLOCK
+#define UNLOCK MU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT MI
diff --git a/lib/locking-selftest-rlock-hardirq.h b/lib/locking-selftest-rlock-hardirq.h
new file mode 100644
index 000000000..9f517ebcb
--- /dev/null
+++ b/lib/locking-selftest-rlock-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-rlock.h"
+#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-rlock-softirq.h b/lib/locking-selftest-rlock-softirq.h
new file mode 100644
index 000000000..981455db7
--- /dev/null
+++ b/lib/locking-selftest-rlock-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-rlock.h"
+#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-rlock.h b/lib/locking-selftest-rlock.h
new file mode 100644
index 000000000..eccab18f5
--- /dev/null
+++ b/lib/locking-selftest-rlock.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK RL
+
+#undef UNLOCK
+#define UNLOCK RU
+
+#undef RLOCK
+#define RLOCK RL
+
+#undef WLOCK
+#define WLOCK WL
+
+#undef INIT
+#define INIT RWI
diff --git a/lib/locking-selftest-rsem.h b/lib/locking-selftest-rsem.h
new file mode 100644
index 000000000..4544858f9
--- /dev/null
+++ b/lib/locking-selftest-rsem.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK RSL
+
+#undef UNLOCK
+#define UNLOCK RSU
+
+#undef RLOCK
+#define RLOCK RSL
+
+#undef WLOCK
+#define WLOCK WSL
+
+#undef INIT
+#define INIT RWSI
diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h
new file mode 100644
index 000000000..fce8714c4
--- /dev/null
+++ b/lib/locking-selftest-rtmutex.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK RTL
+
+#undef UNLOCK
+#define UNLOCK RTU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT RTI
diff --git a/lib/locking-selftest-softirq.h b/lib/locking-selftest-softirq.h
new file mode 100644
index 000000000..6adde4867
--- /dev/null
+++ b/lib/locking-selftest-softirq.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef IRQ_DISABLE
+#undef IRQ_ENABLE
+#undef IRQ_ENTER
+#undef IRQ_EXIT
+
+#define IRQ_DISABLE SOFTIRQ_DISABLE
+#define IRQ_ENABLE SOFTIRQ_ENABLE
+#define IRQ_ENTER SOFTIRQ_ENTER
+#define IRQ_EXIT SOFTIRQ_EXIT
diff --git a/lib/locking-selftest-spin-hardirq.h b/lib/locking-selftest-spin-hardirq.h
new file mode 100644
index 000000000..693198dce
--- /dev/null
+++ b/lib/locking-selftest-spin-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-spin.h"
+#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-spin-softirq.h b/lib/locking-selftest-spin-softirq.h
new file mode 100644
index 000000000..c472e2a87
--- /dev/null
+++ b/lib/locking-selftest-spin-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-spin.h"
+#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-spin.h b/lib/locking-selftest-spin.h
new file mode 100644
index 000000000..6b24d699e
--- /dev/null
+++ b/lib/locking-selftest-spin.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK L
+
+#undef UNLOCK
+#define UNLOCK U
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT SI
diff --git a/lib/locking-selftest-wlock-hardirq.h b/lib/locking-selftest-wlock-hardirq.h
new file mode 100644
index 000000000..2dd2e5122
--- /dev/null
+++ b/lib/locking-selftest-wlock-hardirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-wlock.h"
+#include "locking-selftest-hardirq.h"
diff --git a/lib/locking-selftest-wlock-softirq.h b/lib/locking-selftest-wlock-softirq.h
new file mode 100644
index 000000000..cb80d1cb9
--- /dev/null
+++ b/lib/locking-selftest-wlock-softirq.h
@@ -0,0 +1,2 @@
+#include "locking-selftest-wlock.h"
+#include "locking-selftest-softirq.h"
diff --git a/lib/locking-selftest-wlock.h b/lib/locking-selftest-wlock.h
new file mode 100644
index 000000000..0bc51c8cf
--- /dev/null
+++ b/lib/locking-selftest-wlock.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK WL
+
+#undef UNLOCK
+#define UNLOCK WU
+
+#undef RLOCK
+#define RLOCK RL
+
+#undef WLOCK
+#define WLOCK WL
+
+#undef INIT
+#define INIT RWI
diff --git a/lib/locking-selftest-wsem.h b/lib/locking-selftest-wsem.h
new file mode 100644
index 000000000..5ef18f931
--- /dev/null
+++ b/lib/locking-selftest-wsem.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef LOCK
+#define LOCK WSL
+
+#undef UNLOCK
+#define UNLOCK WSU
+
+#undef RLOCK
+#define RLOCK RSL
+
+#undef WLOCK
+#define WLOCK WSL
+
+#undef INIT
+#define INIT RWSI
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
new file mode 100644
index 000000000..6f6a5fc85
--- /dev/null
+++ b/lib/locking-selftest.c
@@ -0,0 +1,2950 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * lib/locking-selftest.c
+ *
+ * Testsuite for various locking APIs: spinlocks, rwlocks,
+ * mutexes and rw-semaphores.
+ *
+ * It is checking both false positives and false negatives.
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#include <linux/rwsem.h>
+#include <linux/mutex.h>
+#include <linux/ww_mutex.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/delay.h>
+#include <linux/lockdep.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/debug_locks.h>
+#include <linux/irqflags.h>
+#include <linux/rtmutex.h>
+#include <linux/local_lock.h>
+
+#ifdef CONFIG_PREEMPT_RT
+# define NON_RT(...)
+#else
+# define NON_RT(...) __VA_ARGS__
+#endif
+
+/*
+ * Change this to 1 if you want to see the failure printouts:
+ */
+static unsigned int debug_locks_verbose;
+unsigned int force_read_lock_recursive;
+
+static DEFINE_WD_CLASS(ww_lockdep);
+
+static int __init setup_debug_locks_verbose(char *str)
+{
+ get_option(&str, &debug_locks_verbose);
+
+ return 1;
+}
+
+__setup("debug_locks_verbose=", setup_debug_locks_verbose);
+
+#define FAILURE 0
+#define SUCCESS 1
+
+#define LOCKTYPE_SPIN 0x1
+#define LOCKTYPE_RWLOCK 0x2
+#define LOCKTYPE_MUTEX 0x4
+#define LOCKTYPE_RWSEM 0x8
+#define LOCKTYPE_WW 0x10
+#define LOCKTYPE_RTMUTEX 0x20
+#define LOCKTYPE_LL 0x40
+#define LOCKTYPE_SPECIAL 0x80
+
+static struct ww_acquire_ctx t, t2;
+static struct ww_mutex o, o2, o3;
+
+/*
+ * Normal standalone locks, for the circular and irq-context
+ * dependency tests:
+ */
+static DEFINE_SPINLOCK(lock_A);
+static DEFINE_SPINLOCK(lock_B);
+static DEFINE_SPINLOCK(lock_C);
+static DEFINE_SPINLOCK(lock_D);
+
+static DEFINE_RAW_SPINLOCK(raw_lock_A);
+static DEFINE_RAW_SPINLOCK(raw_lock_B);
+
+static DEFINE_RWLOCK(rwlock_A);
+static DEFINE_RWLOCK(rwlock_B);
+static DEFINE_RWLOCK(rwlock_C);
+static DEFINE_RWLOCK(rwlock_D);
+
+static DEFINE_MUTEX(mutex_A);
+static DEFINE_MUTEX(mutex_B);
+static DEFINE_MUTEX(mutex_C);
+static DEFINE_MUTEX(mutex_D);
+
+static DECLARE_RWSEM(rwsem_A);
+static DECLARE_RWSEM(rwsem_B);
+static DECLARE_RWSEM(rwsem_C);
+static DECLARE_RWSEM(rwsem_D);
+
+#ifdef CONFIG_RT_MUTEXES
+
+static DEFINE_RT_MUTEX(rtmutex_A);
+static DEFINE_RT_MUTEX(rtmutex_B);
+static DEFINE_RT_MUTEX(rtmutex_C);
+static DEFINE_RT_MUTEX(rtmutex_D);
+
+#endif
+
+/*
+ * Locks that we initialize dynamically as well so that
+ * e.g. X1 and X2 becomes two instances of the same class,
+ * but X* and Y* are different classes. We do this so that
+ * we do not trigger a real lockup:
+ */
+static DEFINE_SPINLOCK(lock_X1);
+static DEFINE_SPINLOCK(lock_X2);
+static DEFINE_SPINLOCK(lock_Y1);
+static DEFINE_SPINLOCK(lock_Y2);
+static DEFINE_SPINLOCK(lock_Z1);
+static DEFINE_SPINLOCK(lock_Z2);
+
+static DEFINE_RWLOCK(rwlock_X1);
+static DEFINE_RWLOCK(rwlock_X2);
+static DEFINE_RWLOCK(rwlock_Y1);
+static DEFINE_RWLOCK(rwlock_Y2);
+static DEFINE_RWLOCK(rwlock_Z1);
+static DEFINE_RWLOCK(rwlock_Z2);
+
+static DEFINE_MUTEX(mutex_X1);
+static DEFINE_MUTEX(mutex_X2);
+static DEFINE_MUTEX(mutex_Y1);
+static DEFINE_MUTEX(mutex_Y2);
+static DEFINE_MUTEX(mutex_Z1);
+static DEFINE_MUTEX(mutex_Z2);
+
+static DECLARE_RWSEM(rwsem_X1);
+static DECLARE_RWSEM(rwsem_X2);
+static DECLARE_RWSEM(rwsem_Y1);
+static DECLARE_RWSEM(rwsem_Y2);
+static DECLARE_RWSEM(rwsem_Z1);
+static DECLARE_RWSEM(rwsem_Z2);
+
+#ifdef CONFIG_RT_MUTEXES
+
+static DEFINE_RT_MUTEX(rtmutex_X1);
+static DEFINE_RT_MUTEX(rtmutex_X2);
+static DEFINE_RT_MUTEX(rtmutex_Y1);
+static DEFINE_RT_MUTEX(rtmutex_Y2);
+static DEFINE_RT_MUTEX(rtmutex_Z1);
+static DEFINE_RT_MUTEX(rtmutex_Z2);
+
+#endif
+
+static DEFINE_PER_CPU(local_lock_t, local_A);
+
+/*
+ * non-inlined runtime initializers, to let separate locks share
+ * the same lock-class:
+ */
+#define INIT_CLASS_FUNC(class) \
+static noinline void \
+init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
+ struct mutex *mutex, struct rw_semaphore *rwsem)\
+{ \
+ spin_lock_init(lock); \
+ rwlock_init(rwlock); \
+ mutex_init(mutex); \
+ init_rwsem(rwsem); \
+}
+
+INIT_CLASS_FUNC(X)
+INIT_CLASS_FUNC(Y)
+INIT_CLASS_FUNC(Z)
+
+static void init_shared_classes(void)
+{
+#ifdef CONFIG_RT_MUTEXES
+ static struct lock_class_key rt_X, rt_Y, rt_Z;
+
+ __rt_mutex_init(&rtmutex_X1, __func__, &rt_X);
+ __rt_mutex_init(&rtmutex_X2, __func__, &rt_X);
+ __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y);
+ __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y);
+ __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z);
+ __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z);
+#endif
+
+ init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1);
+ init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2);
+
+ init_class_Y(&lock_Y1, &rwlock_Y1, &mutex_Y1, &rwsem_Y1);
+ init_class_Y(&lock_Y2, &rwlock_Y2, &mutex_Y2, &rwsem_Y2);
+
+ init_class_Z(&lock_Z1, &rwlock_Z1, &mutex_Z1, &rwsem_Z1);
+ init_class_Z(&lock_Z2, &rwlock_Z2, &mutex_Z2, &rwsem_Z2);
+}
+
+/*
+ * For spinlocks and rwlocks we also do hardirq-safe / softirq-safe tests.
+ * The following functions use a lock from a simulated hardirq/softirq
+ * context, causing the locks to be marked as hardirq-safe/softirq-safe:
+ */
+
+#define HARDIRQ_DISABLE local_irq_disable
+#define HARDIRQ_ENABLE local_irq_enable
+
+#define HARDIRQ_ENTER() \
+ local_irq_disable(); \
+ __irq_enter(); \
+ lockdep_hardirq_threaded(); \
+ WARN_ON(!in_irq());
+
+#define HARDIRQ_EXIT() \
+ __irq_exit(); \
+ local_irq_enable();
+
+#define SOFTIRQ_DISABLE local_bh_disable
+#define SOFTIRQ_ENABLE local_bh_enable
+
+#define SOFTIRQ_ENTER() \
+ local_bh_disable(); \
+ local_irq_disable(); \
+ lockdep_softirq_enter(); \
+ WARN_ON(!in_softirq());
+
+#define SOFTIRQ_EXIT() \
+ lockdep_softirq_exit(); \
+ local_irq_enable(); \
+ local_bh_enable();
+
+/*
+ * Shortcuts for lock/unlock API variants, to keep
+ * the testcases compact:
+ */
+#define L(x) spin_lock(&lock_##x)
+#define U(x) spin_unlock(&lock_##x)
+#define LU(x) L(x); U(x)
+#define SI(x) spin_lock_init(&lock_##x)
+
+#define WL(x) write_lock(&rwlock_##x)
+#define WU(x) write_unlock(&rwlock_##x)
+#define WLU(x) WL(x); WU(x)
+
+#define RL(x) read_lock(&rwlock_##x)
+#define RU(x) read_unlock(&rwlock_##x)
+#define RLU(x) RL(x); RU(x)
+#define RWI(x) rwlock_init(&rwlock_##x)
+
+#define ML(x) mutex_lock(&mutex_##x)
+#define MU(x) mutex_unlock(&mutex_##x)
+#define MI(x) mutex_init(&mutex_##x)
+
+#define RTL(x) rt_mutex_lock(&rtmutex_##x)
+#define RTU(x) rt_mutex_unlock(&rtmutex_##x)
+#define RTI(x) rt_mutex_init(&rtmutex_##x)
+
+#define WSL(x) down_write(&rwsem_##x)
+#define WSU(x) up_write(&rwsem_##x)
+
+#define RSL(x) down_read(&rwsem_##x)
+#define RSU(x) up_read(&rwsem_##x)
+#define RWSI(x) init_rwsem(&rwsem_##x)
+
+#ifndef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+#define WWAI(x) ww_acquire_init(x, &ww_lockdep)
+#else
+#define WWAI(x) do { ww_acquire_init(x, &ww_lockdep); (x)->deadlock_inject_countdown = ~0U; } while (0)
+#endif
+#define WWAD(x) ww_acquire_done(x)
+#define WWAF(x) ww_acquire_fini(x)
+
+#define WWL(x, c) ww_mutex_lock(x, c)
+#define WWT(x) ww_mutex_trylock(x, NULL)
+#define WWL1(x) ww_mutex_lock(x, NULL)
+#define WWU(x) ww_mutex_unlock(x)
+
+
+#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
+
+/*
+ * Generate different permutations of the same testcase, using
+ * the same basic lock-dependency/state events:
+ */
+
+#define GENERATE_TESTCASE(name) \
+ \
+static void name(void) { E(); }
+
+#define GENERATE_PERMUTATIONS_2_EVENTS(name) \
+ \
+static void name##_12(void) { E1(); E2(); } \
+static void name##_21(void) { E2(); E1(); }
+
+#define GENERATE_PERMUTATIONS_3_EVENTS(name) \
+ \
+static void name##_123(void) { E1(); E2(); E3(); } \
+static void name##_132(void) { E1(); E3(); E2(); } \
+static void name##_213(void) { E2(); E1(); E3(); } \
+static void name##_231(void) { E2(); E3(); E1(); } \
+static void name##_312(void) { E3(); E1(); E2(); } \
+static void name##_321(void) { E3(); E2(); E1(); }
+
+/*
+ * AA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK(X1); \
+ LOCK(X2); /* this one should fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(AA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(AA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(AA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(AA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(AA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(AA_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(AA_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * Special-case for read-locking, they are
+ * allowed to recurse on the same lock class:
+ */
+static void rlock_AA1(void)
+{
+ RL(X1);
+ RL(X1); // this one should NOT fail
+}
+
+static void rlock_AA1B(void)
+{
+ RL(X1);
+ RL(X2); // this one should NOT fail
+}
+
+static void rsem_AA1(void)
+{
+ RSL(X1);
+ RSL(X1); // this one should fail
+}
+
+static void rsem_AA1B(void)
+{
+ RSL(X1);
+ RSL(X2); // this one should fail
+}
+/*
+ * The mixing of read and write locks is not allowed:
+ */
+static void rlock_AA2(void)
+{
+ RL(X1);
+ WL(X2); // this one should fail
+}
+
+static void rsem_AA2(void)
+{
+ RSL(X1);
+ WSL(X2); // this one should fail
+}
+
+static void rlock_AA3(void)
+{
+ WL(X1);
+ RL(X2); // this one should fail
+}
+
+static void rsem_AA3(void)
+{
+ WSL(X1);
+ RSL(X2); // this one should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ */
+static void rlock_ABBA1(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+static void rwsem_ABBA1(void)
+{
+ RSL(X1);
+ ML(Y1);
+ MU(Y1);
+ RSU(X1);
+
+ ML(Y1);
+ WSL(X1);
+ WSU(X1);
+ MU(Y1); // should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ *
+ * This test case is aimed at poking whether the chain cache prevents us from
+ * detecting a read-lock/lock-write deadlock: if the chain cache doesn't differ
+ * read/write locks, the following case may happen
+ *
+ * { read_lock(A)->lock(B) dependency exists }
+ *
+ * P0:
+ * lock(B);
+ * read_lock(A);
+ *
+ * { Not a deadlock, B -> A is added in the chain cache }
+ *
+ * P1:
+ * lock(B);
+ * write_lock(A);
+ *
+ * { B->A found in chain cache, not reported as a deadlock }
+ *
+ */
+static void rlock_chaincache_ABBA1(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ RL(X1);
+ RU(X1);
+ U(Y1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+/*
+ * read_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * read_lock(A)
+ */
+static void rlock_ABBA2(void)
+{
+ RL(X1);
+ L(Y1);
+ U(Y1);
+ RU(X1);
+
+ L(Y1);
+ RL(X1);
+ RU(X1);
+ U(Y1); // should NOT fail
+}
+
+static void rwsem_ABBA2(void)
+{
+ RSL(X1);
+ ML(Y1);
+ MU(Y1);
+ RSU(X1);
+
+ ML(Y1);
+ RSL(X1);
+ RSU(X1);
+ MU(Y1); // should fail
+}
+
+
+/*
+ * write_lock(A)
+ * spin_lock(B)
+ * spin_lock(B)
+ * write_lock(A)
+ */
+static void rlock_ABBA3(void)
+{
+ WL(X1);
+ L(Y1);
+ U(Y1);
+ WU(X1);
+
+ L(Y1);
+ WL(X1);
+ WU(X1);
+ U(Y1); // should fail
+}
+
+static void rwsem_ABBA3(void)
+{
+ WSL(X1);
+ ML(Y1);
+ MU(Y1);
+ WSU(X1);
+
+ ML(Y1);
+ WSL(X1);
+ WSU(X1);
+ MU(Y1); // should fail
+}
+
+/*
+ * ABBA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBA_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBA_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * AB BC CA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(C, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBCCA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBCCA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBCCA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBCCA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBCCA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBCCA_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBCCA_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * AB CA BC deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, A); \
+ LOCK_UNLOCK_2(B, C); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCABC_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCABC_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCABC_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCABC_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCABC_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCABC_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCABC_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * AB BC CD DA deadlock:
+ */
+
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABBCCDDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABBCCDDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABBCCDDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABBCCDDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABBCCDDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABBCCDDA_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABBCCDDA_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * AB CD BD DA deadlock:
+ */
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(B, D); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCDBDDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCDBDDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCDBDDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCDBDDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCDBDDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCDBDDA_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCDBDDA_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * AB CD BC DA deadlock:
+ */
+#define E() \
+ \
+ LOCK_UNLOCK_2(A, B); \
+ LOCK_UNLOCK_2(C, D); \
+ LOCK_UNLOCK_2(B, C); \
+ LOCK_UNLOCK_2(D, A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(ABCDBCDA_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(ABCDBCDA_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(ABCDBCDA_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(ABCDBCDA_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(ABCDBCDA_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(ABCDBCDA_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(ABCDBCDA_rtmutex);
+#endif
+
+#undef E
+
+#ifdef CONFIG_PREEMPT_RT
+# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
+#else
+# define RT_PREPARE_DBL_UNLOCK()
+#endif
+/*
+ * Double unlock:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ RT_PREPARE_DBL_UNLOCK(); \
+ UNLOCK(A); \
+ UNLOCK(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(double_unlock_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(double_unlock_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(double_unlock_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(double_unlock_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(double_unlock_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(double_unlock_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(double_unlock_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * initializing a held lock:
+ */
+#define E() \
+ \
+ LOCK(A); \
+ INIT(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(init_held_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(init_held_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(init_held_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(init_held_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(init_held_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(init_held_rsem)
+
+#ifdef CONFIG_RT_MUTEXES
+#include "locking-selftest-rtmutex.h"
+GENERATE_TESTCASE(init_held_rtmutex);
+#endif
+
+#undef E
+
+/*
+ * locking an irq-safe lock with irqs enabled:
+ */
+#define E1() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+#define E2() \
+ \
+ LOCK(A); \
+ UNLOCK(A);
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Enabling hardirqs with a softirq-safe lock held:
+ */
+#define E1() \
+ \
+ SOFTIRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ SOFTIRQ_EXIT();
+
+#define E2() \
+ \
+ HARDIRQ_DISABLE(); \
+ LOCK(A); \
+ HARDIRQ_ENABLE(); \
+ UNLOCK(A);
+
+/*
+ * Generate 12 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_spin)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_wlock)
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+
+#undef E1
+#undef E2
+
+#endif
+
+/*
+ * Enabling irqs with an irq-safe lock held:
+ */
+#define E1() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+#define E2() \
+ \
+ IRQ_DISABLE(); \
+ LOCK(A); \
+ IRQ_ENABLE(); \
+ UNLOCK(A);
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+
+/*
+ * Acquiring a irq-unsafe lock while holding an irq-safe-lock:
+ */
+#define E1() \
+ \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ UNLOCK(A); \
+
+#define E2() \
+ \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * If a lock turns into softirq-safe, but earlier it took
+ * a softirq-unsafe lock:
+ */
+
+#define E1() \
+ IRQ_DISABLE(); \
+ LOCK(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ UNLOCK(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock irq inversion.
+ *
+ * Deadlock scenario:
+ *
+ * CPU#1 is at #1, i.e. it has write-locked A, but has not
+ * taken B yet.
+ *
+ * CPU#2 is at #2, i.e. it has locked B.
+ *
+ * Hardirq hits CPU#2 at point #2 and is trying to read-lock A.
+ *
+ * The deadlock occurs because CPU#1 will spin on B, and CPU#2
+ * will spin on A.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(A); \
+ LOCK(B); \
+ UNLOCK(B); \
+ WU(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ LOCK(B); \
+ UNLOCK(B);
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(A); \
+ RU(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 36 testcases:
+ */
+#include "locking-selftest-spin-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_spin)
+
+#include "locking-selftest-rlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
+
+#include "locking-selftest-wlock-hardirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-spin-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
+
+#include "locking-selftest-rlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
+
+#include "locking-selftest-wlock-softirq.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-read / write-read / write-read deadlock even if read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ RL(Y1); \
+ RU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ WL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ WU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ RL(X1); \
+ RU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1R2_W2R3_W3R1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-write / read-read / write-read deadlock even if read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ WL(Y1); \
+ WU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ RL(X1); \
+ RU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_W3R1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-write / read-read / read-write is not deadlock when read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ WL(Y1); \
+ WU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ RL(Z1); \
+ WL(X1); \
+ WU(X1); \
+ RU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1R2_R2R3_W3W1)
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * write-read / read-read / write-write is not deadlock when read is recursive
+ */
+
+#define E1() \
+ \
+ WL(X1); \
+ RL(Y1); \
+ RU(Y1); \
+ WU(X1);
+
+#define E2() \
+ \
+ RL(Y1); \
+ RL(Z1); \
+ RU(Z1); \
+ RU(Y1);
+
+#define E3() \
+ \
+ WL(Z1); \
+ WL(X1); \
+ WU(X1); \
+ WU(Z1);
+
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
+
+#undef E1
+#undef E2
+#undef E3
+/*
+ * read-lock / write-lock recursion that is actually safe.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(A); \
+ WU(A); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ LOCK(A); \
+ L(B); \
+ U(B); \
+ UNLOCK(A); \
+ IRQ_EXIT();
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-softirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+
+/*
+ * read-lock / write-lock recursion that is unsafe.
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ L(B); \
+ LOCK(A); \
+ UNLOCK(A); \
+ U(B); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ L(B); \
+ U(B); \
+ IRQ_EXIT();
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-softirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
+#endif
+
+#undef E1
+#undef E2
+#undef E3
+/*
+ * read-lock / write-lock recursion that is unsafe.
+ *
+ * A is a ENABLED_*_READ lock
+ * B is a USED_IN_*_READ lock
+ *
+ * read_lock(A);
+ * write_lock(B);
+ * <interrupt>
+ * read_lock(B);
+ * write_lock(A); // if this one is read_lock(), no deadlock
+ */
+
+#define E1() \
+ \
+ IRQ_DISABLE(); \
+ WL(B); \
+ LOCK(A); \
+ UNLOCK(A); \
+ WU(B); \
+ IRQ_ENABLE();
+
+#define E2() \
+ \
+ RL(A); \
+ RU(A); \
+
+#define E3() \
+ \
+ IRQ_ENTER(); \
+ RL(B); \
+ RU(B); \
+ IRQ_EXIT();
+
+/*
+ * Generate 24 testcases:
+ */
+#include "locking-selftest-hardirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
+
+#ifndef CONFIG_PREEMPT_RT
+#include "locking-selftest-softirq.h"
+#include "locking-selftest-rlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
+
+#include "locking-selftest-wlock.h"
+GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+# define I_RAW_SPINLOCK(x) lockdep_reset_lock(&raw_lock_##x.dep_map)
+# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
+# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
+# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
+# define I_WW(x) lockdep_reset_lock(&x.dep_map)
+# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
+#ifdef CONFIG_RT_MUTEXES
+# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
+#endif
+#else
+# define I_SPINLOCK(x)
+# define I_RAW_SPINLOCK(x)
+# define I_RWLOCK(x)
+# define I_MUTEX(x)
+# define I_RWSEM(x)
+# define I_WW(x)
+# define I_LOCAL_LOCK(x)
+#endif
+
+#ifndef I_RTMUTEX
+# define I_RTMUTEX(x)
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x)
+#else
+#define I2_RTMUTEX(x)
+#endif
+
+#define I1(x) \
+ do { \
+ I_SPINLOCK(x); \
+ I_RWLOCK(x); \
+ I_MUTEX(x); \
+ I_RWSEM(x); \
+ I_RTMUTEX(x); \
+ } while (0)
+
+#define I2(x) \
+ do { \
+ spin_lock_init(&lock_##x); \
+ rwlock_init(&rwlock_##x); \
+ mutex_init(&mutex_##x); \
+ init_rwsem(&rwsem_##x); \
+ I2_RTMUTEX(x); \
+ } while (0)
+
+static void reset_locks(void)
+{
+ local_irq_disable();
+ lockdep_free_key_range(&ww_lockdep.acquire_key, 1);
+ lockdep_free_key_range(&ww_lockdep.mutex_key, 1);
+
+ I1(A); I1(B); I1(C); I1(D);
+ I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
+ I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
+ I_RAW_SPINLOCK(A); I_RAW_SPINLOCK(B);
+ I_LOCAL_LOCK(A);
+
+ lockdep_reset();
+
+ I2(A); I2(B); I2(C); I2(D);
+ init_shared_classes();
+ raw_spin_lock_init(&raw_lock_A);
+ raw_spin_lock_init(&raw_lock_B);
+ local_lock_init(this_cpu_ptr(&local_A));
+
+ ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
+ memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
+ memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key));
+ memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key));
+ local_irq_enable();
+}
+
+#undef I
+
+static int testcase_total;
+static int testcase_successes;
+static int expected_testcase_failures;
+static int unexpected_testcase_failures;
+
+static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
+{
+ int saved_preempt_count = preempt_count();
+#ifdef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SMP
+ int saved_mgd_count = current->migration_disabled;
+#endif
+ int saved_rcu_count = current->rcu_read_lock_nesting;
+#endif
+
+ WARN_ON(irqs_disabled());
+
+ debug_locks_silent = !(debug_locks_verbose & lockclass_mask);
+
+ testcase_fn();
+ /*
+ * Filter out expected failures:
+ */
+#ifndef CONFIG_PROVE_LOCKING
+ if (expected == FAILURE && debug_locks) {
+ expected_testcase_failures++;
+ pr_cont("failed|");
+ }
+ else
+#endif
+ if (debug_locks != expected) {
+ unexpected_testcase_failures++;
+ pr_cont("FAILED|");
+ } else {
+ testcase_successes++;
+ pr_cont(" ok |");
+ }
+ testcase_total++;
+
+ if (debug_locks_verbose & lockclass_mask)
+ pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n",
+ lockclass_mask, debug_locks, expected);
+ /*
+ * Some tests (e.g. double-unlock) might corrupt the preemption
+ * count, so restore it:
+ */
+ preempt_count_set(saved_preempt_count);
+
+#ifdef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SMP
+ while (current->migration_disabled > saved_mgd_count)
+ migrate_enable();
+#endif
+
+ while (current->rcu_read_lock_nesting > saved_rcu_count)
+ rcu_read_unlock();
+ WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ if (softirq_count())
+ current->softirqs_enabled = 0;
+ else
+ current->softirqs_enabled = 1;
+#endif
+
+ reset_locks();
+}
+
+#ifdef CONFIG_RT_MUTEXES
+#define dotest_rt(fn, e, m) dotest((fn), (e), (m))
+#else
+#define dotest_rt(fn, e, m)
+#endif
+
+static inline void print_testname(const char *testname)
+{
+ printk("%33s:", testname);
+}
+
+#define DO_TESTCASE_1(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_1B(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_1RR(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_1RRB(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+
+#define DO_TESTCASE_3(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_3RW(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ dotest(name##_spin_##nr, FAILURE, LOCKTYPE_SPIN|LOCKTYPE_RWLOCK);\
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_2RW(desc, name, nr) \
+ print_testname(desc"/"#nr); \
+ pr_cont(" |"); \
+ dotest(name##_wlock_##nr, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock_##nr, SUCCESS, LOCKTYPE_RWLOCK); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_2x2RW(desc, name, nr) \
+ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
+ NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
+
+#define DO_TESTCASE_6x2x2RW(desc, name) \
+ DO_TESTCASE_2x2RW(desc, name, 123); \
+ DO_TESTCASE_2x2RW(desc, name, 132); \
+ DO_TESTCASE_2x2RW(desc, name, 213); \
+ DO_TESTCASE_2x2RW(desc, name, 231); \
+ DO_TESTCASE_2x2RW(desc, name, 312); \
+ DO_TESTCASE_2x2RW(desc, name, 321);
+
+#define DO_TESTCASE_6(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_6_SUCCESS(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \
+ dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \
+ pr_cont("\n");
+
+/*
+ * 'read' variant: rlocks must not trigger.
+ */
+#define DO_TESTCASE_6R(desc, name) \
+ print_testname(desc); \
+ dotest(name##_spin, FAILURE, LOCKTYPE_SPIN); \
+ dotest(name##_wlock, FAILURE, LOCKTYPE_RWLOCK); \
+ dotest(name##_rlock, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \
+ dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \
+ pr_cont("\n");
+
+#define DO_TESTCASE_2I(desc, name, nr) \
+ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
+ NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
+
+#define DO_TESTCASE_2IB(desc, name, nr) \
+ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
+ NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
+
+#define DO_TESTCASE_6I(desc, name, nr) \
+ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
+ NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
+
+#define DO_TESTCASE_6IRW(desc, name, nr) \
+ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
+ NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
+
+#define DO_TESTCASE_2x3(desc, name) \
+ DO_TESTCASE_3(desc, name, 12); \
+ DO_TESTCASE_3(desc, name, 21);
+
+#define DO_TESTCASE_2x6(desc, name) \
+ DO_TESTCASE_6I(desc, name, 12); \
+ DO_TESTCASE_6I(desc, name, 21);
+
+#define DO_TESTCASE_6x2(desc, name) \
+ DO_TESTCASE_2I(desc, name, 123); \
+ DO_TESTCASE_2I(desc, name, 132); \
+ DO_TESTCASE_2I(desc, name, 213); \
+ DO_TESTCASE_2I(desc, name, 231); \
+ DO_TESTCASE_2I(desc, name, 312); \
+ DO_TESTCASE_2I(desc, name, 321);
+
+#define DO_TESTCASE_6x2B(desc, name) \
+ DO_TESTCASE_2IB(desc, name, 123); \
+ DO_TESTCASE_2IB(desc, name, 132); \
+ DO_TESTCASE_2IB(desc, name, 213); \
+ DO_TESTCASE_2IB(desc, name, 231); \
+ DO_TESTCASE_2IB(desc, name, 312); \
+ DO_TESTCASE_2IB(desc, name, 321);
+
+#define DO_TESTCASE_6x1RR(desc, name) \
+ DO_TESTCASE_1RR(desc, name, 123); \
+ DO_TESTCASE_1RR(desc, name, 132); \
+ DO_TESTCASE_1RR(desc, name, 213); \
+ DO_TESTCASE_1RR(desc, name, 231); \
+ DO_TESTCASE_1RR(desc, name, 312); \
+ DO_TESTCASE_1RR(desc, name, 321);
+
+#define DO_TESTCASE_6x1RRB(desc, name) \
+ DO_TESTCASE_1RRB(desc, name, 123); \
+ DO_TESTCASE_1RRB(desc, name, 132); \
+ DO_TESTCASE_1RRB(desc, name, 213); \
+ DO_TESTCASE_1RRB(desc, name, 231); \
+ DO_TESTCASE_1RRB(desc, name, 312); \
+ DO_TESTCASE_1RRB(desc, name, 321);
+
+#define DO_TESTCASE_6x6(desc, name) \
+ DO_TESTCASE_6I(desc, name, 123); \
+ DO_TESTCASE_6I(desc, name, 132); \
+ DO_TESTCASE_6I(desc, name, 213); \
+ DO_TESTCASE_6I(desc, name, 231); \
+ DO_TESTCASE_6I(desc, name, 312); \
+ DO_TESTCASE_6I(desc, name, 321);
+
+#define DO_TESTCASE_6x6RW(desc, name) \
+ DO_TESTCASE_6IRW(desc, name, 123); \
+ DO_TESTCASE_6IRW(desc, name, 132); \
+ DO_TESTCASE_6IRW(desc, name, 213); \
+ DO_TESTCASE_6IRW(desc, name, 231); \
+ DO_TESTCASE_6IRW(desc, name, 312); \
+ DO_TESTCASE_6IRW(desc, name, 321);
+
+static void ww_test_fail_acquire(void)
+{
+ int ret;
+
+ WWAI(&t);
+ t.stamp++;
+
+ ret = WWL(&o, &t);
+
+ if (WARN_ON(!o.ctx) ||
+ WARN_ON(ret))
+ return;
+
+ /* No lockdep test, pure API */
+ ret = WWL(&o, &t);
+ WARN_ON(ret != -EALREADY);
+
+ ret = WWT(&o);
+ WARN_ON(ret);
+
+ t2 = t;
+ t2.stamp++;
+ ret = WWL(&o, &t2);
+ WARN_ON(ret != -EDEADLK);
+ WWU(&o);
+
+ if (WWT(&o))
+ WWU(&o);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ else
+ DEBUG_LOCKS_WARN_ON(1);
+#endif
+}
+
+#ifdef CONFIG_PREEMPT_RT
+#define ww_mutex_base_lock(b) rt_mutex_lock(b)
+#define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
+#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
+#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
+#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
+#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
+#else
+#define ww_mutex_base_lock(b) mutex_lock(b)
+#define ww_mutex_base_trylock(b) mutex_trylock(b)
+#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
+#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
+#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
+#define ww_mutex_base_unlock(b) mutex_unlock(b)
+#endif
+
+static void ww_test_normal(void)
+{
+ int ret;
+
+ WWAI(&t);
+
+ /*
+ * None of the ww_mutex codepaths should be taken in the 'normal'
+ * mutex calls. The easiest way to verify this is by using the
+ * normal mutex calls, and making sure o.ctx is unmodified.
+ */
+
+ /* mutex_lock (and indirectly, mutex_lock_nested) */
+ o.ctx = (void *)~0UL;
+ ww_mutex_base_lock(&o.base);
+ ww_mutex_base_unlock(&o.base);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* mutex_lock_interruptible (and *_nested) */
+ o.ctx = (void *)~0UL;
+ ret = ww_mutex_base_lock_interruptible(&o.base);
+ if (!ret)
+ ww_mutex_base_unlock(&o.base);
+ else
+ WARN_ON(1);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* mutex_lock_killable (and *_nested) */
+ o.ctx = (void *)~0UL;
+ ret = ww_mutex_base_lock_killable(&o.base);
+ if (!ret)
+ ww_mutex_base_unlock(&o.base);
+ else
+ WARN_ON(1);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* trylock, succeeding */
+ o.ctx = (void *)~0UL;
+ ret = ww_mutex_base_trylock(&o.base);
+ WARN_ON(!ret);
+ if (ret)
+ ww_mutex_base_unlock(&o.base);
+ else
+ WARN_ON(1);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* trylock, failing */
+ o.ctx = (void *)~0UL;
+ ww_mutex_base_lock(&o.base);
+ ret = ww_mutex_base_trylock(&o.base);
+ WARN_ON(ret);
+ ww_mutex_base_unlock(&o.base);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* nest_lock */
+ o.ctx = (void *)~0UL;
+ ww_mutex_base_lock_nest_lock(&o.base, &t);
+ ww_mutex_base_unlock(&o.base);
+ WARN_ON(o.ctx != (void *)~0UL);
+}
+
+static void ww_test_two_contexts(void)
+{
+ WWAI(&t);
+ WWAI(&t2);
+}
+
+static void ww_test_diff_class(void)
+{
+ WWAI(&t);
+#ifdef DEBUG_WW_MUTEXES
+ t.ww_class = NULL;
+#endif
+ WWL(&o, &t);
+}
+
+static void ww_test_context_done_twice(void)
+{
+ WWAI(&t);
+ WWAD(&t);
+ WWAD(&t);
+ WWAF(&t);
+}
+
+static void ww_test_context_unlock_twice(void)
+{
+ WWAI(&t);
+ WWAD(&t);
+ WWAF(&t);
+ WWAF(&t);
+}
+
+static void ww_test_context_fini_early(void)
+{
+ WWAI(&t);
+ WWL(&o, &t);
+ WWAD(&t);
+ WWAF(&t);
+}
+
+static void ww_test_context_lock_after_done(void)
+{
+ WWAI(&t);
+ WWAD(&t);
+ WWL(&o, &t);
+}
+
+static void ww_test_object_unlock_twice(void)
+{
+ WWL1(&o);
+ WWU(&o);
+ WWU(&o);
+}
+
+static void ww_test_object_lock_unbalanced(void)
+{
+ WWAI(&t);
+ WWL(&o, &t);
+ t.acquired = 0;
+ WWU(&o);
+ WWAF(&t);
+}
+
+static void ww_test_object_lock_stale_context(void)
+{
+ WWAI(&t);
+ o.ctx = &t2;
+ WWL(&o, &t);
+}
+
+static void ww_test_edeadlk_normal(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ o2.ctx = &t2;
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+ ww_mutex_base_unlock(&o2.base);
+ WWU(&o);
+
+ WWL(&o2, &t);
+}
+
+static void ww_test_edeadlk_normal_slow(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+ ww_mutex_base_unlock(&o2.base);
+ WWU(&o);
+
+ ww_mutex_lock_slow(&o2, &t);
+}
+
+static void ww_test_edeadlk_no_unlock(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ o2.ctx = &t2;
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+ ww_mutex_base_unlock(&o2.base);
+
+ WWL(&o2, &t);
+}
+
+static void ww_test_edeadlk_no_unlock_slow(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+ ww_mutex_base_unlock(&o2.base);
+
+ ww_mutex_lock_slow(&o2, &t);
+}
+
+static void ww_test_edeadlk_acquire_more(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ ret = WWL(&o3, &t);
+}
+
+static void ww_test_edeadlk_acquire_more_slow(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ ww_mutex_lock_slow(&o3, &t);
+}
+
+static void ww_test_edeadlk_acquire_more_edeadlk(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ ww_mutex_base_lock(&o3.base);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
+ o3.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ ret = WWL(&o3, &t);
+ WARN_ON(ret != -EDEADLK);
+}
+
+static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ ww_mutex_base_lock(&o3.base);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
+ o3.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+
+ ww_mutex_lock_slow(&o3, &t);
+}
+
+static void ww_test_edeadlk_acquire_wrong(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+ if (!ret)
+ WWU(&o2);
+
+ WWU(&o);
+
+ ret = WWL(&o3, &t);
+}
+
+static void ww_test_edeadlk_acquire_wrong_slow(void)
+{
+ int ret;
+
+ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+ WWAI(&t);
+ t2 = t;
+ t2.stamp--;
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret != -EDEADLK);
+ if (!ret)
+ WWU(&o2);
+
+ WWU(&o);
+
+ ww_mutex_lock_slow(&o3, &t);
+}
+
+static void ww_test_spin_nest_unlocked(void)
+{
+ spin_lock_nest_lock(&lock_A, &o.base);
+ U(A);
+}
+
+/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
+static void ww_test_spin_nest_lock(void)
+{
+ spin_lock(&lock_X1);
+ spin_lock_nest_lock(&lock_Y1, &lock_X1);
+ spin_lock(&lock_A);
+ spin_lock_nest_lock(&lock_Y2, &lock_X1);
+ spin_unlock(&lock_A);
+ spin_unlock(&lock_Y2);
+ spin_unlock(&lock_Y1);
+ spin_unlock(&lock_X1);
+}
+
+static void ww_test_unneeded_slow(void)
+{
+ WWAI(&t);
+
+ ww_mutex_lock_slow(&o, &t);
+}
+
+static void ww_test_context_block(void)
+{
+ int ret;
+
+ WWAI(&t);
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+ WWL1(&o2);
+}
+
+static void ww_test_context_try(void)
+{
+ int ret;
+
+ WWAI(&t);
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWT(&o2);
+ WARN_ON(!ret);
+ WWU(&o2);
+ WWU(&o);
+}
+
+static void ww_test_context_context(void)
+{
+ int ret;
+
+ WWAI(&t);
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret);
+
+ WWU(&o2);
+ WWU(&o);
+}
+
+static void ww_test_try_block(void)
+{
+ bool ret;
+
+ ret = WWT(&o);
+ WARN_ON(!ret);
+
+ WWL1(&o2);
+ WWU(&o2);
+ WWU(&o);
+}
+
+static void ww_test_try_try(void)
+{
+ bool ret;
+
+ ret = WWT(&o);
+ WARN_ON(!ret);
+ ret = WWT(&o2);
+ WARN_ON(!ret);
+ WWU(&o2);
+ WWU(&o);
+}
+
+static void ww_test_try_context(void)
+{
+ int ret;
+
+ ret = WWT(&o);
+ WARN_ON(!ret);
+
+ WWAI(&t);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret);
+}
+
+static void ww_test_block_block(void)
+{
+ WWL1(&o);
+ WWL1(&o2);
+}
+
+static void ww_test_block_try(void)
+{
+ bool ret;
+
+ WWL1(&o);
+ ret = WWT(&o2);
+ WARN_ON(!ret);
+}
+
+static void ww_test_block_context(void)
+{
+ int ret;
+
+ WWL1(&o);
+ WWAI(&t);
+
+ ret = WWL(&o2, &t);
+ WARN_ON(ret);
+}
+
+static void ww_test_spin_block(void)
+{
+ L(A);
+ U(A);
+
+ WWL1(&o);
+ L(A);
+ U(A);
+ WWU(&o);
+
+ L(A);
+ WWL1(&o);
+ WWU(&o);
+ U(A);
+}
+
+static void ww_test_spin_try(void)
+{
+ bool ret;
+
+ L(A);
+ U(A);
+
+ ret = WWT(&o);
+ WARN_ON(!ret);
+ L(A);
+ U(A);
+ WWU(&o);
+
+ L(A);
+ ret = WWT(&o);
+ WARN_ON(!ret);
+ WWU(&o);
+ U(A);
+}
+
+static void ww_test_spin_context(void)
+{
+ int ret;
+
+ L(A);
+ U(A);
+
+ WWAI(&t);
+
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+ L(A);
+ U(A);
+ WWU(&o);
+
+ L(A);
+ ret = WWL(&o, &t);
+ WARN_ON(ret);
+ WWU(&o);
+ U(A);
+}
+
+static void ww_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | Wound/wait tests |\n");
+ printk(" ---------------------\n");
+
+ print_testname("ww api failures");
+ dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("ww contexts mixing");
+ dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("finishing ww context");
+ dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("locking mismatches");
+ dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("EDEADLK handling");
+ dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_normal_slow, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_no_unlock, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_no_unlock_slow, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_acquire_more, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_acquire_more_slow, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_acquire_more_edeadlk, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("spinlock nest unlocked");
+ dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("spinlock nest test");
+ dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ printk(" -----------------------------------------------------\n");
+ printk(" |block | try |context|\n");
+ printk(" -----------------------------------------------------\n");
+
+ print_testname("context");
+ dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("try");
+ dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("block");
+ dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+
+ print_testname("spinlock");
+ dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
+ dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
+ dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
+ pr_cont("\n");
+}
+
+
+/*
+ * <in hardirq handler>
+ * read_lock(&A);
+ * <hardirq disable>
+ * spin_lock(&B);
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock.
+ */
+static void queued_read_lock_hardirq_RE_Er(void)
+{
+ HARDIRQ_ENTER();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * <hardirq disable>
+ * read_lock(&A);
+ * read_lock(&A);
+ * spin_lock(&B);
+ *
+ * is not a deadlock.
+ */
+static void queued_read_lock_hardirq_ER_rE(void)
+{
+ HARDIRQ_ENTER();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <hardirq disable>
+ * spin_lock(&B);
+ * read_lock(&A);
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock. Because the two read_lock()s are both non-recursive readers.
+ */
+static void queued_read_lock_hardirq_inversion(void)
+{
+
+ HARDIRQ_ENTER();
+ LOCK(B);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+}
+
+static void queued_read_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | queued read lock tests |\n");
+ printk(" ---------------------------\n");
+ print_testname("hardirq read-lock/lock-read");
+ dotest(queued_read_lock_hardirq_RE_Er, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq lock-read/read-lock");
+ dotest(queued_read_lock_hardirq_ER_rE, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq inversion");
+ dotest(queued_read_lock_hardirq_inversion, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+}
+
+static void fs_reclaim_correct_nesting(void)
+{
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_alloc(GFP_NOFS);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_wrong_nesting(void)
+{
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_alloc(GFP_KERNEL);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_protected_nesting(void)
+{
+ unsigned int flags;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ flags = memalloc_nofs_save();
+ might_alloc(GFP_KERNEL);
+ memalloc_nofs_restore(flags);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
+static void fs_reclaim_tests(void)
+{
+ printk(" --------------------\n");
+ printk(" | fs_reclaim tests |\n");
+ printk(" --------------------\n");
+
+ print_testname("correct nesting");
+ dotest(fs_reclaim_correct_nesting, SUCCESS, 0);
+ pr_cont("\n");
+
+ print_testname("wrong nesting");
+ dotest(fs_reclaim_wrong_nesting, FAILURE, 0);
+ pr_cont("\n");
+
+ print_testname("protected nesting");
+ dotest(fs_reclaim_protected_nesting, SUCCESS, 0);
+ pr_cont("\n");
+}
+
+/* Defines guard classes to create contexts */
+DEFINE_LOCK_GUARD_0(HARDIRQ, HARDIRQ_ENTER(), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(NOTTHREADED_HARDIRQ,
+ do {
+ local_irq_disable();
+ __irq_enter();
+ WARN_ON(!in_irq());
+ } while(0), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(SOFTIRQ, SOFTIRQ_ENTER(), SOFTIRQ_EXIT())
+
+/* Define RCU guards, should go away when RCU has its own guard definitions */
+DEFINE_LOCK_GUARD_0(RCU, rcu_read_lock(), rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(RCU_BH, rcu_read_lock_bh(), rcu_read_unlock_bh())
+DEFINE_LOCK_GUARD_0(RCU_SCHED, rcu_read_lock_sched(), rcu_read_unlock_sched())
+
+
+#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
+ \
+static void __maybe_unused inner##_in_##outer(void) \
+{ \
+ /* Relies the reversed clean-up ordering: inner first */ \
+ guard(outer)(outer_lock); \
+ guard(inner)(inner_lock); \
+}
+
+/*
+ * wait contexts (considering PREEMPT_RT)
+ *
+ * o: inner is allowed in outer
+ * x: inner is disallowed in outer
+ *
+ * \ inner | RCU | RAW_SPIN | SPIN | MUTEX
+ * outer \ | | | |
+ * ---------------+-------+----------+------+-------
+ * HARDIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * NOTTHREADED_IRQ| o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SOFTIRQ | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_BH | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * RCU_SCHED | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * RAW_SPIN | o | o | x | x
+ * ---------------+-------+----------+------+-------
+ * SPIN | o | o | o | x
+ * ---------------+-------+----------+------+-------
+ * MUTEX | o | o | o | o
+ * ---------------+-------+----------+------+-------
+ */
+
+#define GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(raw_spinlock, &raw_lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(spinlock, &lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(mutex, &mutex_A, inner, inner_lock)
+
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(raw_spinlock, &raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(spinlock, &lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(mutex, &mutex_B)
+
+/* the outer context allows all kinds of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
+
+/*
+ * the outer context only allows the preemption introduced by spinlock_t (which
+ * is a sleepable lock for PREEMPT_RT)
+ */
+#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+/* the outer doesn't allows any kind of preemption */
+#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
+ dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, FAILURE, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+
+static void wait_context_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | wait context tests |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | rcu | raw | spin |mutex |\n");
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("in hardirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in hardirq context (not threaded)");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(NOTTHREADED_HARDIRQ);
+ pr_cont("\n");
+
+ print_testname("in softirq context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SOFTIRQ);
+ pr_cont("\n");
+
+ print_testname("in RCU context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU);
+ pr_cont("\n");
+
+ print_testname("in RCU-bh context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
+ pr_cont("\n");
+
+ print_testname("in RCU-sched context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
+ pr_cont("\n");
+
+ print_testname("in RAW_SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(raw_spinlock);
+ pr_cont("\n");
+
+ print_testname("in SPINLOCK context");
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(spinlock);
+ pr_cont("\n");
+
+ print_testname("in MUTEX context");
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(mutex);
+ pr_cont("\n");
+}
+
+static void local_lock_2(void)
+{
+ local_lock(&local_A); /* IRQ-ON */
+ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
+ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3A(void)
+{
+ local_lock(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+}
+
+static void local_lock_3B(void)
+{
+ local_lock(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT()
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B); /* IN-IRQ <-> IRQ-ON cycle, true */
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_DISABLE();
+
+}
+
+static void local_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | local_lock tests |\n");
+ printk(" ---------------------\n");
+
+ print_testname("local_lock inversion 2");
+ dotest(local_lock_2, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3A");
+ dotest(local_lock_3A, SUCCESS, LOCKTYPE_LL);
+ pr_cont("\n");
+
+ print_testname("local_lock inversion 3B");
+ dotest(local_lock_3B, FAILURE, LOCKTYPE_LL);
+ pr_cont("\n");
+}
+
+static void hardirq_deadlock_softirq_not_deadlock(void)
+{
+ /* mutex_A is hardirq-unsafe and softirq-unsafe */
+ /* mutex_A -> lock_C */
+ mutex_lock(&mutex_A);
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+ mutex_unlock(&mutex_A);
+
+ /* lock_A is hardirq-safe */
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_EXIT();
+
+ /* lock_A -> lock_B */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+ spin_lock(&lock_B);
+ spin_unlock(&lock_B);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
+ /* lock_B -> lock_C */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_B);
+ spin_lock(&lock_C);
+ spin_unlock(&lock_C);
+ spin_unlock(&lock_B);
+ HARDIRQ_ENABLE();
+
+ /* lock_D is softirq-safe */
+ SOFTIRQ_ENTER();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_EXIT();
+
+ /* And lock_D is hardirq-unsafe */
+ SOFTIRQ_DISABLE();
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ SOFTIRQ_ENABLE();
+
+ /*
+ * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
+ * deadlock.
+ *
+ * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
+ * hardirq-unsafe, deadlock.
+ */
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_C);
+ spin_lock(&lock_D);
+ spin_unlock(&lock_D);
+ spin_unlock(&lock_C);
+ HARDIRQ_ENABLE();
+}
+
+void locking_selftest(void)
+{
+ /*
+ * Got a locking failure before the selftest ran?
+ */
+ if (!debug_locks) {
+ printk("----------------------------------\n");
+ printk("| Locking API testsuite disabled |\n");
+ printk("----------------------------------\n");
+ return;
+ }
+
+ /*
+ * treats read_lock() as recursive read locks for testing purpose
+ */
+ force_read_lock_recursive = 1;
+
+ /*
+ * Run the testsuite:
+ */
+ printk("------------------------\n");
+ printk("| Locking API testsuite:\n");
+ printk("----------------------------------------------------------------------------\n");
+ printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
+ printk(" --------------------------------------------------------------------------\n");
+
+ init_shared_classes();
+ lockdep_set_selftest_task(current);
+
+ DO_TESTCASE_6R("A-A deadlock", AA);
+ DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
+ DO_TESTCASE_6R("A-B-B-C-C-A deadlock", ABBCCA);
+ DO_TESTCASE_6R("A-B-C-A-B-C deadlock", ABCABC);
+ DO_TESTCASE_6R("A-B-B-C-C-D-D-A deadlock", ABBCCDDA);
+ DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
+ DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
+ DO_TESTCASE_6("double unlock", double_unlock);
+ DO_TESTCASE_6("initialize held", init_held);
+
+ printk(" --------------------------------------------------------------------------\n");
+ print_testname("recursive read-lock");
+ pr_cont(" |");
+ dotest(rlock_AA1, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
+ pr_cont("\n");
+
+ print_testname("recursive read-lock #2");
+ pr_cont(" |");
+ dotest(rlock_AA1B, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
+ pr_cont("\n");
+
+ print_testname("mixed read-write-lock");
+ pr_cont(" |");
+ dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
+ pr_cont("\n");
+
+ print_testname("mixed write-read-lock");
+ pr_cont(" |");
+ dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
+ pr_cont("\n");
+
+ print_testname("mixed read-lock/lock-write ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("mixed read-lock/lock-read ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA2, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA2, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("mixed write-lock/lock-write ABBA");
+ pr_cont(" |");
+ dotest(rlock_ABBA3, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont(" |");
+ dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM);
+
+ print_testname("chain cached mixed R-L/L-W ABBA");
+ pr_cont(" |");
+ dotest(rlock_chaincache_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+
+ DO_TESTCASE_6x1RRB("rlock W1R2/W2R3/W3R1", W1R2_W2R3_W3R1);
+ DO_TESTCASE_6x1RRB("rlock W1W2/R2R3/W3R1", W1W2_R2R3_W3R1);
+ DO_TESTCASE_6x1RR("rlock W1W2/R2R3/R3W1", W1W2_R2R3_R3W1);
+ DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
+
+ printk(" --------------------------------------------------------------------------\n");
+ /*
+ * irq-context testcases:
+ */
+ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
+ NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
+ DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
+ DO_TESTCASE_6x6RW("irq lock-inversion", irq_inversion);
+
+ DO_TESTCASE_6x2x2RW("irq read-recursion", irq_read_recursion);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
+
+ ww_tests();
+
+ force_read_lock_recursive = 0;
+ /*
+ * queued_read_lock() specific test cases can be put here
+ */
+ if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
+ queued_read_lock_tests();
+
+ fs_reclaim_tests();
+
+ /* Wait context test cases that are specific for RAW_LOCK_NESTING */
+ if (IS_ENABLED(CONFIG_PROVE_RAW_LOCK_NESTING))
+ wait_context_tests();
+
+ local_lock_tests();
+
+ print_testname("hardirq_unsafe_softirq_safe");
+ dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
+ pr_cont("\n");
+
+ if (unexpected_testcase_failures) {
+ printk("-----------------------------------------------------------------\n");
+ debug_locks = 0;
+ printk("BUG: %3d unexpected failures (out of %3d) - debugging disabled! |\n",
+ unexpected_testcase_failures, testcase_total);
+ printk("-----------------------------------------------------------------\n");
+ } else if (expected_testcase_failures && testcase_successes) {
+ printk("--------------------------------------------------------\n");
+ printk("%3d out of %3d testcases failed, as expected. |\n",
+ expected_testcase_failures, testcase_total);
+ printk("----------------------------------------------------\n");
+ debug_locks = 1;
+ } else if (expected_testcase_failures && !testcase_successes) {
+ printk("--------------------------------------------------------\n");
+ printk("All %3d testcases failed, as expected. |\n",
+ expected_testcase_failures);
+ printk("----------------------------------------\n");
+ debug_locks = 1;
+ } else {
+ printk("-------------------------------------------------------\n");
+ printk("Good, all %3d testcases passed! |\n",
+ testcase_successes);
+ printk("---------------------------------\n");
+ debug_locks = 1;
+ }
+ lockdep_set_selftest_task(NULL);
+ debug_locks_silent = 0;
+}
diff --git a/lib/lockref.c b/lib/lockref.c
new file mode 100644
index 000000000..2afe4c5d8
--- /dev/null
+++ b/lib/lockref.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/lockref.h>
+
+#if USE_CMPXCHG_LOCKREF
+
+/*
+ * Note that the "cmpxchg()" reloads the "old" value for the
+ * failure case.
+ */
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
+ int retry = 100; \
+ struct lockref old; \
+ BUILD_BUG_ON(sizeof(old) != 8); \
+ old.lock_count = READ_ONCE(lockref->lock_count); \
+ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
+ struct lockref new = old; \
+ CODE \
+ if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \
+ &old.lock_count, \
+ new.lock_count))) { \
+ SUCCESS; \
+ } \
+ if (!--retry) \
+ break; \
+ } \
+} while (0)
+
+#else
+
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
+
+#endif
+
+/**
+ * lockref_get - Increments reference count unconditionally
+ * @lockref: pointer to lockref structure
+ *
+ * This operation is only valid if you already hold a reference
+ * to the object, so you know the count cannot be zero.
+ */
+void lockref_get(struct lockref *lockref)
+{
+ CMPXCHG_LOOP(
+ new.count++;
+ ,
+ return;
+ );
+
+ spin_lock(&lockref->lock);
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+}
+EXPORT_SYMBOL(lockref_get);
+
+/**
+ * lockref_get_not_zero - Increments count unless the count is 0 or dead
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count was zero
+ */
+int lockref_get_not_zero(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count++;
+ if (old.count <= 0)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count > 0) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_get_not_zero);
+
+/**
+ * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count would become zero
+ */
+int lockref_put_not_zero(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count > 1) {
+ lockref->count--;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_put_not_zero);
+
+/**
+ * lockref_put_return - Decrement reference count if possible
+ * @lockref: pointer to lockref structure
+ *
+ * Decrement the reference count and return the new value.
+ * If the lockref was dead or locked, return an error.
+ */
+int lockref_put_return(struct lockref *lockref)
+{
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 0)
+ return -1;
+ ,
+ return new.count;
+ );
+ return -1;
+}
+EXPORT_SYMBOL(lockref_put_return);
+
+/**
+ * lockref_put_or_lock - decrements count unless count <= 1 before decrement
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
+ */
+int lockref_put_or_lock(struct lockref *lockref)
+{
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ break;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ if (lockref->count <= 1)
+ return 0;
+ lockref->count--;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+EXPORT_SYMBOL(lockref_put_or_lock);
+
+/**
+ * lockref_mark_dead - mark lockref dead
+ * @lockref: pointer to lockref structure
+ */
+void lockref_mark_dead(struct lockref *lockref)
+{
+ assert_spin_locked(&lockref->lock);
+ lockref->count = -128;
+}
+EXPORT_SYMBOL(lockref_mark_dead);
+
+/**
+ * lockref_get_not_dead - Increments count unless the ref is dead
+ * @lockref: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if lockref was dead
+ */
+int lockref_get_not_dead(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count++;
+ if (old.count < 0)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count >= 0) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_get_not_dead);
diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c
new file mode 100644
index 000000000..b247d412d
--- /dev/null
+++ b/lib/logic_iomem.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/logic_iomem.h>
+#include <asm/io.h>
+
+struct logic_iomem_region {
+ const struct resource *res;
+ const struct logic_iomem_region_ops *ops;
+ struct list_head list;
+};
+
+struct logic_iomem_area {
+ const struct logic_iomem_ops *ops;
+ void *priv;
+};
+
+#define AREA_SHIFT 24
+#define MAX_AREA_SIZE (1 << AREA_SHIFT)
+#define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE)
+#define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
+#define AREA_MASK (MAX_AREA_SIZE - 1)
+#ifdef CONFIG_64BIT
+#define IOREMAP_BIAS 0xDEAD000000000000UL
+#define IOREMAP_MASK 0xFFFFFFFF00000000UL
+#else
+#define IOREMAP_BIAS 0x80000000UL
+#define IOREMAP_MASK 0x80000000UL
+#endif
+
+static DEFINE_MUTEX(regions_mtx);
+static LIST_HEAD(regions_list);
+static struct logic_iomem_area mapped_areas[MAX_AREAS];
+
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops)
+{
+ struct logic_iomem_region *rreg;
+ int err;
+
+ if (WARN_ON(!resource || !ops))
+ return -EINVAL;
+
+ if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
+ return -EINVAL;
+
+ rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
+ if (!rreg)
+ return -ENOMEM;
+
+ err = request_resource(&iomem_resource, resource);
+ if (err) {
+ kfree(rreg);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&regions_mtx);
+ rreg->res = resource;
+ rreg->ops = ops;
+ list_add_tail(&rreg->list, &regions_list);
+ mutex_unlock(&regions_mtx);
+
+ return 0;
+}
+EXPORT_SYMBOL(logic_iomem_add_region);
+
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
+static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
+{
+ WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
+ (unsigned long long)offset, size);
+ return NULL;
+}
+
+static void real_iounmap(volatile void __iomem *addr)
+{
+ WARN(1, "invalid iounmap for addr 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+}
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+ void __iomem *ret = NULL;
+ struct logic_iomem_region *rreg, *found = NULL;
+ int i;
+
+ mutex_lock(&regions_mtx);
+ list_for_each_entry(rreg, &regions_list, list) {
+ if (rreg->res->start > offset)
+ continue;
+ if (rreg->res->end < offset + size - 1)
+ continue;
+ found = rreg;
+ break;
+ }
+
+ if (!found)
+ goto out;
+
+ for (i = 0; i < MAX_AREAS; i++) {
+ long offs;
+
+ if (mapped_areas[i].ops)
+ continue;
+
+ offs = rreg->ops->map(offset - found->res->start,
+ size, &mapped_areas[i].ops,
+ &mapped_areas[i].priv);
+ if (offs < 0) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ if (WARN_ON(!mapped_areas[i].ops)) {
+ mapped_areas[i].ops = NULL;
+ break;
+ }
+
+ ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
+ break;
+ }
+out:
+ mutex_unlock(&regions_mtx);
+ if (ret)
+ return ret;
+ return real_ioremap(offset, size);
+}
+EXPORT_SYMBOL(ioremap);
+
+static inline struct logic_iomem_area *
+get_area(const volatile void __iomem *addr)
+{
+ unsigned long a = (unsigned long)addr;
+ unsigned int idx;
+
+ if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
+ return NULL;
+
+ idx = (a & AREA_BITS) >> AREA_SHIFT;
+
+ if (mapped_areas[idx].ops)
+ return &mapped_areas[idx];
+
+ return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+ struct logic_iomem_area *area = get_area(addr);
+
+ if (!area) {
+ real_iounmap(addr);
+ return;
+ }
+
+ if (area->ops->unmap)
+ area->ops->unmap(area->priv);
+
+ mutex_lock(&regions_mtx);
+ area->ops = NULL;
+ area->priv = NULL;
+ mutex_unlock(&regions_mtx);
+}
+EXPORT_SYMBOL(iounmap);
+
+#ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
+#define MAKE_FALLBACK(op, sz) \
+static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid read" #op " at address %llx\n", \
+ (unsigned long long)(uintptr_t __force)addr); \
+ return (u ## sz)~0ULL; \
+} \
+ \
+static void real_raw_write ## op(u ## sz val, \
+ volatile void __iomem *addr) \
+{ \
+ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
+ (unsigned long long)val, \
+ (unsigned long long)(uintptr_t __force)addr);\
+} \
+
+MAKE_FALLBACK(b, 8);
+MAKE_FALLBACK(w, 16);
+MAKE_FALLBACK(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_FALLBACK(q, 64);
+#endif
+
+static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ WARN(1, "Invalid memset_io at address 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+}
+
+static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+
+ memset(buffer, 0xff, size);
+}
+
+static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
+ size_t size)
+{
+ WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
+ (unsigned long long)(uintptr_t __force)addr);
+}
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define MAKE_OP(op, sz) \
+u##sz __raw_read ## op(const volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) \
+ return real_raw_read ## op(addr); \
+ \
+ return (u ## sz) area->ops->read(area->priv, \
+ (unsigned long)addr & AREA_MASK,\
+ sz / 8); \
+} \
+EXPORT_SYMBOL(__raw_read ## op); \
+ \
+void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
+{ \
+ struct logic_iomem_area *area = get_area(addr); \
+ \
+ if (!area) { \
+ real_raw_write ## op(val, addr); \
+ return; \
+ } \
+ \
+ area->ops->write(area->priv, \
+ (unsigned long)addr & AREA_MASK, \
+ sz / 8, val); \
+} \
+EXPORT_SYMBOL(__raw_write ## op)
+
+MAKE_OP(b, 8);
+MAKE_OP(w, 16);
+MAKE_OP(l, 32);
+#ifdef CONFIG_64BIT
+MAKE_OP(q, 64);
+#endif
+
+void memset_io(volatile void __iomem *addr, int value, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memset_io(addr, value, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->set) {
+ area->ops->set(area->priv, start, value, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, value);
+}
+EXPORT_SYMBOL(memset_io);
+
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_fromio(buffer, addr, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_from) {
+ area->ops->copy_from(area->priv, buffer, start, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ buf[offs] = area->ops->read(area->priv, start + offs, 1);
+}
+EXPORT_SYMBOL(memcpy_fromio);
+
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
+{
+ struct logic_iomem_area *area = get_area(addr);
+ const u8 *buf = buffer;
+ unsigned long offs, start;
+
+ if (!area) {
+ real_memcpy_toio(addr, buffer, size);
+ return;
+ }
+
+ start = (unsigned long)addr & AREA_MASK;
+
+ if (area->ops->copy_to) {
+ area->ops->copy_to(area->priv, start, buffer, size);
+ return;
+ }
+
+ for (offs = 0; offs < size; offs++)
+ area->ops->write(area->priv, start + offs, 1, buf[offs]);
+}
+EXPORT_SYMBOL(memcpy_toio);
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
new file mode 100644
index 000000000..2ea564a40
--- /dev/null
+++ b/lib/logic_pio.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
+ * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
+ * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ * Author: John Garry <john.garry@huawei.com>
+ */
+
+#define pr_fmt(fmt) "LOGIC PIO: " fmt
+
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/logic_pio.h>
+#include <linux/mm.h>
+#include <linux/rculist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+/* The unique hardware address list */
+static LIST_HEAD(io_range_list);
+static DEFINE_MUTEX(io_range_mutex);
+
+/**
+ * logic_pio_register_range - register logical PIO range for a host
+ * @new_range: pointer to the IO range to be registered.
+ *
+ * Returns 0 on success, the error code in case of failure.
+ * If the range already exists, -EEXIST will be returned, which should be
+ * considered a success.
+ *
+ * Register a new IO range node in the IO range list.
+ */
+int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+{
+ struct logic_pio_hwaddr *range;
+ resource_size_t start;
+ resource_size_t end;
+ resource_size_t mmio_end = 0;
+ resource_size_t iio_sz = MMIO_UPPER_LIMIT;
+ int ret = 0;
+
+ if (!new_range || !new_range->fwnode || !new_range->size ||
+ (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
+ return -EINVAL;
+
+ start = new_range->hw_start;
+ end = new_range->hw_start + new_range->size;
+
+ mutex_lock(&io_range_mutex);
+ list_for_each_entry(range, &io_range_list, list) {
+ if (range->fwnode == new_range->fwnode) {
+ /* range already there */
+ ret = -EEXIST;
+ goto end_register;
+ }
+ if (range->flags == LOGIC_PIO_CPU_MMIO &&
+ new_range->flags == LOGIC_PIO_CPU_MMIO) {
+ /* for MMIO ranges we need to check for overlap */
+ if (start >= range->hw_start + range->size ||
+ end < range->hw_start) {
+ mmio_end = range->io_start + range->size;
+ } else {
+ ret = -EFAULT;
+ goto end_register;
+ }
+ } else if (range->flags == LOGIC_PIO_INDIRECT &&
+ new_range->flags == LOGIC_PIO_INDIRECT) {
+ iio_sz += range->size;
+ }
+ }
+
+ /* range not registered yet, check for available space */
+ if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
+ if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+ /* if it's too big check if 64K space can be reserved */
+ if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->size = SZ_64K;
+ pr_warn("Requested IO range too big, new size set to 64K\n");
+ }
+ new_range->io_start = mmio_end;
+ } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
+ if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->io_start = iio_sz;
+ } else {
+ /* invalid flag */
+ ret = -EINVAL;
+ goto end_register;
+ }
+
+ list_add_tail_rcu(&new_range->list, &io_range_list);
+
+end_register:
+ mutex_unlock(&io_range_mutex);
+ return ret;
+}
+
+/**
+ * logic_pio_unregister_range - unregister a logical PIO range for a host
+ * @range: pointer to the IO range which has been already registered.
+ *
+ * Unregister a previously-registered IO range node.
+ */
+void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
+{
+ mutex_lock(&io_range_mutex);
+ list_del_rcu(&range->list);
+ mutex_unlock(&io_range_mutex);
+ synchronize_rcu();
+}
+
+/**
+ * find_io_range_by_fwnode - find logical PIO range for given FW node
+ * @fwnode: FW node handle associated with logical PIO range
+ *
+ * Returns pointer to node on success, NULL otherwise.
+ *
+ * Traverse the io_range_list to find the registered node for @fwnode.
+ */
+struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct logic_pio_hwaddr *range, *found_range = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->fwnode == fwnode) {
+ found_range = range;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return found_range;
+}
+
+/* Return a registered range given an input PIO token */
+static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
+{
+ struct logic_pio_hwaddr *range, *found_range = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (in_range(pio, range->io_start, range->size)) {
+ found_range = range;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!found_range)
+ pr_err("PIO entry token 0x%lx invalid\n", pio);
+
+ return found_range;
+}
+
+/**
+ * logic_pio_to_hwaddr - translate logical PIO to HW address
+ * @pio: logical PIO value
+ *
+ * Returns HW address if valid, ~0 otherwise.
+ *
+ * Translate the input logical PIO to the corresponding hardware address.
+ * The input PIO should be unique in the whole logical PIO space.
+ */
+resource_size_t logic_pio_to_hwaddr(unsigned long pio)
+{
+ struct logic_pio_hwaddr *range;
+
+ range = find_io_range(pio);
+ if (range)
+ return range->hw_start + pio - range->io_start;
+
+ return (resource_size_t)~0;
+}
+
+/**
+ * logic_pio_trans_hwaddr - translate HW address to logical PIO
+ * @fwnode: FW node reference for the host
+ * @addr: Host-relative HW address
+ * @size: size to translate
+ *
+ * Returns Logical PIO value if successful, ~0UL otherwise
+ */
+unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t addr, resource_size_t size)
+{
+ struct logic_pio_hwaddr *range;
+
+ range = find_io_range_by_fwnode(fwnode);
+ if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
+ pr_err("IO range not found or invalid\n");
+ return ~0UL;
+ }
+ if (range->size < size) {
+ pr_err("resource size %pa cannot fit in IO range size %pa\n",
+ &size, &range->size);
+ return ~0UL;
+ }
+ return addr - range->hw_start + range->io_start;
+}
+
+unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
+{
+ struct logic_pio_hwaddr *range;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->flags != LOGIC_PIO_CPU_MMIO)
+ continue;
+ if (in_range(addr, range->hw_start, range->size)) {
+ unsigned long cpuaddr;
+
+ cpuaddr = addr - range->hw_start + range->io_start;
+
+ rcu_read_unlock();
+ return cpuaddr;
+ }
+ }
+ rcu_read_unlock();
+
+ pr_err("addr %pa not registered in io_range_list\n", &addr);
+
+ return ~0UL;
+}
+
+#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
+#define BUILD_LOGIC_IO(bwl, type) \
+type logic_in##bwl(unsigned long addr) \
+{ \
+ type ret = (type)~0; \
+ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ ret = _in##bwl(addr); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry) \
+ ret = entry->ops->in(entry->hostdata, \
+ addr, sizeof(type)); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+ return ret; \
+} \
+ \
+void logic_out##bwl(type value, unsigned long addr) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ _out##bwl(value, addr); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry) \
+ entry->ops->out(entry->hostdata, \
+ addr, value, sizeof(type)); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+} \
+ \
+void logic_ins##bwl(unsigned long addr, void *buffer, \
+ unsigned int count) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ reads##bwl(PCI_IOBASE + addr, buffer, count); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry) \
+ entry->ops->ins(entry->hostdata, \
+ addr, buffer, sizeof(type), count); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+ \
+} \
+ \
+void logic_outs##bwl(unsigned long addr, const void *buffer, \
+ unsigned int count) \
+{ \
+ if (addr < MMIO_UPPER_LIMIT) { \
+ writes##bwl(PCI_IOBASE + addr, buffer, count); \
+ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
+ struct logic_pio_hwaddr *entry = find_io_range(addr); \
+ \
+ if (entry) \
+ entry->ops->outs(entry->hostdata, \
+ addr, buffer, sizeof(type), count); \
+ else \
+ WARN_ON_ONCE(1); \
+ } \
+}
+
+BUILD_LOGIC_IO(b, u8)
+EXPORT_SYMBOL(logic_inb);
+EXPORT_SYMBOL(logic_insb);
+EXPORT_SYMBOL(logic_outb);
+EXPORT_SYMBOL(logic_outsb);
+
+BUILD_LOGIC_IO(w, u16)
+EXPORT_SYMBOL(logic_inw);
+EXPORT_SYMBOL(logic_insw);
+EXPORT_SYMBOL(logic_outw);
+EXPORT_SYMBOL(logic_outsw);
+
+BUILD_LOGIC_IO(l, u32)
+EXPORT_SYMBOL(logic_inl);
+EXPORT_SYMBOL(logic_insl);
+EXPORT_SYMBOL(logic_outl);
+EXPORT_SYMBOL(logic_outsl);
+
+#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
new file mode 100644
index 000000000..b3d918761
--- /dev/null
+++ b/lib/lru_cache.c
@@ -0,0 +1,622 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ lru_cache.c
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h> /* for memset */
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/lru_cache.h>
+
+MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
+ "Lars Ellenberg <lars@linbit.com>");
+MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
+MODULE_LICENSE("GPL");
+
+/* this is developers aid only.
+ * it catches concurrent access (lack of locking on the users part) */
+#define PARANOIA_ENTRY() do { \
+ BUG_ON(!lc); \
+ BUG_ON(!lc->nr_elements); \
+ BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
+} while (0)
+
+#define RETURN(x...) do { \
+ clear_bit_unlock(__LC_PARANOIA, &lc->flags); \
+ return x ; } while (0)
+
+/* BUG() if e is not one of the elements tracked by lc */
+#define PARANOIA_LC_ELEMENT(lc, e) do { \
+ struct lru_cache *lc_ = (lc); \
+ struct lc_element *e_ = (e); \
+ unsigned i = e_->lc_index; \
+ BUG_ON(i >= lc_->nr_elements); \
+ BUG_ON(lc_->lc_element[i] != e_); } while (0)
+
+
+/* We need to atomically
+ * - try to grab the lock (set LC_LOCKED)
+ * - only if there is no pending transaction
+ * (neither LC_DIRTY nor LC_STARVING is set)
+ * Because of PARANOIA_ENTRY() above abusing lc->flags as well,
+ * it is not sufficient to just say
+ * return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED);
+ */
+int lc_try_lock(struct lru_cache *lc)
+{
+ unsigned long val;
+ do {
+ val = cmpxchg(&lc->flags, 0, LC_LOCKED);
+ } while (unlikely (val == LC_PARANOIA));
+ /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
+ return 0 == val;
+}
+
+/**
+ * lc_create - prepares to track objects in an active set
+ * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @cache: cache root pointer
+ * @max_pending_changes: maximum changes to accumulate until a transaction is required
+ * @e_count: number of elements allowed to be active simultaneously
+ * @e_size: size of the tracked objects
+ * @e_off: offset to the &struct lc_element member in a tracked object
+ *
+ * Returns a pointer to a newly initialized struct lru_cache on success,
+ * or NULL on (allocation) failure.
+ */
+struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
+ unsigned max_pending_changes,
+ unsigned e_count, size_t e_size, size_t e_off)
+{
+ struct hlist_head *slot = NULL;
+ struct lc_element **element = NULL;
+ struct lru_cache *lc;
+ struct lc_element *e;
+ unsigned cache_obj_size = kmem_cache_size(cache);
+ unsigned i;
+
+ WARN_ON(cache_obj_size < e_size);
+ if (cache_obj_size < e_size)
+ return NULL;
+
+ /* e_count too big; would probably fail the allocation below anyways.
+ * for typical use cases, e_count should be few thousand at most. */
+ if (e_count > LC_MAX_ACTIVE)
+ return NULL;
+
+ slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL);
+ if (!slot)
+ goto out_fail;
+ element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL);
+ if (!element)
+ goto out_fail;
+
+ lc = kzalloc(sizeof(*lc), GFP_KERNEL);
+ if (!lc)
+ goto out_fail;
+
+ INIT_LIST_HEAD(&lc->in_use);
+ INIT_LIST_HEAD(&lc->lru);
+ INIT_LIST_HEAD(&lc->free);
+ INIT_LIST_HEAD(&lc->to_be_changed);
+
+ lc->name = name;
+ lc->element_size = e_size;
+ lc->element_off = e_off;
+ lc->nr_elements = e_count;
+ lc->max_pending_changes = max_pending_changes;
+ lc->lc_cache = cache;
+ lc->lc_element = element;
+ lc->lc_slot = slot;
+
+ /* preallocate all objects */
+ for (i = 0; i < e_count; i++) {
+ void *p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p)
+ break;
+ memset(p, 0, lc->element_size);
+ e = p + e_off;
+ e->lc_index = i;
+ e->lc_number = LC_FREE;
+ e->lc_new_number = LC_FREE;
+ list_add(&e->list, &lc->free);
+ element[i] = e;
+ }
+ if (i == e_count)
+ return lc;
+
+ /* else: could not allocate all elements, give up */
+ while (i) {
+ void *p = element[--i];
+ kmem_cache_free(cache, p - e_off);
+ }
+ kfree(lc);
+out_fail:
+ kfree(element);
+ kfree(slot);
+ return NULL;
+}
+
+static void lc_free_by_index(struct lru_cache *lc, unsigned i)
+{
+ void *p = lc->lc_element[i];
+ WARN_ON(!p);
+ if (p) {
+ p -= lc->element_off;
+ kmem_cache_free(lc->lc_cache, p);
+ }
+}
+
+/**
+ * lc_destroy - frees memory allocated by lc_create()
+ * @lc: the lru cache to destroy
+ */
+void lc_destroy(struct lru_cache *lc)
+{
+ unsigned i;
+ if (!lc)
+ return;
+ for (i = 0; i < lc->nr_elements; i++)
+ lc_free_by_index(lc, i);
+ kfree(lc->lc_element);
+ kfree(lc->lc_slot);
+ kfree(lc);
+}
+
+/**
+ * lc_reset - does a full reset for @lc and the hash table slots.
+ * @lc: the lru cache to operate on
+ *
+ * It is roughly the equivalent of re-allocating a fresh lru_cache object,
+ * basically a short cut to lc_destroy(lc); lc = lc_create(...);
+ */
+void lc_reset(struct lru_cache *lc)
+{
+ unsigned i;
+
+ INIT_LIST_HEAD(&lc->in_use);
+ INIT_LIST_HEAD(&lc->lru);
+ INIT_LIST_HEAD(&lc->free);
+ INIT_LIST_HEAD(&lc->to_be_changed);
+ lc->used = 0;
+ lc->hits = 0;
+ lc->misses = 0;
+ lc->starving = 0;
+ lc->locked = 0;
+ lc->changed = 0;
+ lc->pending_changes = 0;
+ lc->flags = 0;
+ memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
+
+ for (i = 0; i < lc->nr_elements; i++) {
+ struct lc_element *e = lc->lc_element[i];
+ void *p = e;
+ p -= lc->element_off;
+ memset(p, 0, lc->element_size);
+ /* re-init it */
+ e->lc_index = i;
+ e->lc_number = LC_FREE;
+ e->lc_new_number = LC_FREE;
+ list_add(&e->list, &lc->free);
+ }
+}
+
+/**
+ * lc_seq_printf_stats - print stats about @lc into @seq
+ * @seq: the seq_file to print into
+ * @lc: the lru cache to print statistics of
+ */
+void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
+{
+ /* NOTE:
+ * total calls to lc_get are
+ * (starving + hits + misses)
+ * misses include "locked" count (update from an other thread in
+ * progress) and "changed", when this in fact lead to an successful
+ * update of the cache.
+ */
+ seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
+ lc->name, lc->used, lc->nr_elements,
+ lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
+}
+
+static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
+{
+ return lc->lc_slot + (enr % lc->nr_elements);
+}
+
+
+static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr,
+ bool include_changing)
+{
+ struct lc_element *e;
+
+ BUG_ON(!lc);
+ BUG_ON(!lc->nr_elements);
+ hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) {
+ /* "about to be changed" elements, pending transaction commit,
+ * are hashed by their "new number". "Normal" elements have
+ * lc_number == lc_new_number. */
+ if (e->lc_new_number != enr)
+ continue;
+ if (e->lc_new_number == e->lc_number || include_changing)
+ return e;
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * lc_find - find element by label, if present in the hash table
+ * @lc: The lru_cache object
+ * @enr: element number
+ *
+ * Returns the pointer to an element, if the element with the requested
+ * "label" or element number is present in the hash table,
+ * or NULL if not found. Does not change the refcnt.
+ * Ignores elements that are "about to be used", i.e. not yet in the active
+ * set, but still pending transaction commit.
+ */
+struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
+{
+ return __lc_find(lc, enr, 0);
+}
+
+/**
+ * lc_is_used - find element by label
+ * @lc: The lru_cache object
+ * @enr: element number
+ *
+ * Returns true, if the element with the requested "label" or element number is
+ * present in the hash table, and is used (refcnt > 0).
+ * Also finds elements that are not _currently_ used but only "about to be
+ * used", i.e. on the "to_be_changed" list, pending transaction commit.
+ */
+bool lc_is_used(struct lru_cache *lc, unsigned int enr)
+{
+ struct lc_element *e = __lc_find(lc, enr, 1);
+ return e && e->refcnt;
+}
+
+/**
+ * lc_del - removes an element from the cache
+ * @lc: The lru_cache object
+ * @e: The element to remove
+ *
+ * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
+ * sets @e->enr to %LC_FREE.
+ */
+void lc_del(struct lru_cache *lc, struct lc_element *e)
+{
+ PARANOIA_ENTRY();
+ PARANOIA_LC_ELEMENT(lc, e);
+ BUG_ON(e->refcnt);
+
+ e->lc_number = e->lc_new_number = LC_FREE;
+ hlist_del_init(&e->colision);
+ list_move(&e->list, &lc->free);
+ RETURN();
+}
+
+static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number)
+{
+ struct list_head *n;
+ struct lc_element *e;
+
+ if (!list_empty(&lc->free))
+ n = lc->free.next;
+ else if (!list_empty(&lc->lru))
+ n = lc->lru.prev;
+ else
+ return NULL;
+
+ e = list_entry(n, struct lc_element, list);
+ PARANOIA_LC_ELEMENT(lc, e);
+
+ e->lc_new_number = new_number;
+ if (!hlist_unhashed(&e->colision))
+ __hlist_del(&e->colision);
+ hlist_add_head(&e->colision, lc_hash_slot(lc, new_number));
+ list_move(&e->list, &lc->to_be_changed);
+
+ return e;
+}
+
+static int lc_unused_element_available(struct lru_cache *lc)
+{
+ if (!list_empty(&lc->free))
+ return 1; /* something on the free list */
+ if (!list_empty(&lc->lru))
+ return 1; /* something to evict */
+
+ return 0;
+}
+
+/* used as internal flags to __lc_get */
+enum {
+ LC_GET_MAY_CHANGE = 1,
+ LC_GET_MAY_USE_UNCOMMITTED = 2,
+};
+
+static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
+{
+ struct lc_element *e;
+
+ PARANOIA_ENTRY();
+ if (test_bit(__LC_STARVING, &lc->flags)) {
+ ++lc->starving;
+ RETURN(NULL);
+ }
+
+ e = __lc_find(lc, enr, 1);
+ /* if lc_new_number != lc_number,
+ * this enr is currently being pulled in already,
+ * and will be available once the pending transaction
+ * has been committed. */
+ if (e) {
+ if (e->lc_new_number != e->lc_number) {
+ /* It has been found above, but on the "to_be_changed"
+ * list, not yet committed. Don't pull it in twice,
+ * wait for the transaction, then try again...
+ */
+ if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
+ RETURN(NULL);
+ /* ... unless the caller is aware of the implications,
+ * probably preparing a cumulative transaction. */
+ ++e->refcnt;
+ ++lc->hits;
+ RETURN(e);
+ }
+ /* else: lc_new_number == lc_number; a real hit. */
+ ++lc->hits;
+ if (e->refcnt++ == 0)
+ lc->used++;
+ list_move(&e->list, &lc->in_use); /* Not evictable... */
+ RETURN(e);
+ }
+ /* e == NULL */
+
+ ++lc->misses;
+ if (!(flags & LC_GET_MAY_CHANGE))
+ RETURN(NULL);
+
+ /* To avoid races with lc_try_lock(), first, mark us dirty
+ * (using test_and_set_bit, as it implies memory barriers), ... */
+ test_and_set_bit(__LC_DIRTY, &lc->flags);
+
+ /* ... only then check if it is locked anyways. If lc_unlock clears
+ * the dirty bit again, that's not a problem, we will come here again.
+ */
+ if (test_bit(__LC_LOCKED, &lc->flags)) {
+ ++lc->locked;
+ RETURN(NULL);
+ }
+
+ /* In case there is nothing available and we can not kick out
+ * the LRU element, we have to wait ...
+ */
+ if (!lc_unused_element_available(lc)) {
+ set_bit(__LC_STARVING, &lc->flags);
+ RETURN(NULL);
+ }
+
+ /* It was not present in the active set. We are going to recycle an
+ * unused (or even "free") element, but we won't accumulate more than
+ * max_pending_changes changes. */
+ if (lc->pending_changes >= lc->max_pending_changes)
+ RETURN(NULL);
+
+ e = lc_prepare_for_change(lc, enr);
+ BUG_ON(!e);
+
+ clear_bit(__LC_STARVING, &lc->flags);
+ BUG_ON(++e->refcnt != 1);
+ lc->used++;
+ lc->pending_changes++;
+
+ RETURN(e);
+}
+
+/**
+ * lc_get - get element by label, maybe change the active set
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Finds an element in the cache, increases its usage count,
+ * "touches" and returns it.
+ *
+ * In case the requested number is not present, it needs to be added to the
+ * cache. Therefore it is possible that an other element becomes evicted from
+ * the cache. In either case, the user is notified so he is able to e.g. keep
+ * a persistent log of the cache changes, and therefore the objects in use.
+ *
+ * Return values:
+ * NULL
+ * The cache was marked %LC_STARVING,
+ * or the requested label was not in the active set
+ * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
+ * Or no unused or free element could be recycled (@lc will be marked as
+ * %LC_STARVING, blocking further lc_get() operations).
+ *
+ * pointer to the element with the REQUESTED element number.
+ * In this case, it can be used right away
+ *
+ * pointer to an UNUSED element with some different element number,
+ * where that different number may also be %LC_FREE.
+ *
+ * In this case, the cache is marked %LC_DIRTY,
+ * so lc_try_lock() will no longer succeed.
+ * The returned element pointer is moved to the "to_be_changed" list,
+ * and registered with the new element number on the hash collision chains,
+ * so it is possible to pick it up from lc_is_used().
+ * Up to "max_pending_changes" (see lc_create()) can be accumulated.
+ * The user now should do whatever housekeeping is necessary,
+ * typically serialize on lc_try_lock_for_transaction(), then call
+ * lc_committed(lc) and lc_unlock(), to finish the change.
+ *
+ * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
+ * any cache set change.
+ */
+struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
+{
+ return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
+}
+
+/**
+ * lc_get_cumulative - like lc_get; also finds to-be-changed elements
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Unlike lc_get this also returns the element for @enr, if it is belonging to
+ * a pending transaction, so the return values are like for lc_get(),
+ * plus:
+ *
+ * pointer to an element already on the "to_be_changed" list.
+ * In this case, the cache was already marked %LC_DIRTY.
+ *
+ * Caller needs to make sure that the pending transaction is completed,
+ * before proceeding to actually use this element.
+ */
+struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
+{
+ return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
+}
+
+/**
+ * lc_try_get - get element by label, if present; do not change the active set
+ * @lc: the lru cache to operate on
+ * @enr: the label to look up
+ *
+ * Finds an element in the cache, increases its usage count,
+ * "touches" and returns it.
+ *
+ * Return values:
+ * NULL
+ * The cache was marked %LC_STARVING,
+ * or the requested label was not in the active set
+ *
+ * pointer to the element with the REQUESTED element number.
+ * In this case, it can be used right away
+ */
+struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
+{
+ return __lc_get(lc, enr, 0);
+}
+
+/**
+ * lc_committed - tell @lc that pending changes have been recorded
+ * @lc: the lru cache to operate on
+ *
+ * User is expected to serialize on explicit lc_try_lock_for_transaction()
+ * before the transaction is started, and later needs to lc_unlock() explicitly
+ * as well.
+ */
+void lc_committed(struct lru_cache *lc)
+{
+ struct lc_element *e, *tmp;
+
+ PARANOIA_ENTRY();
+ list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) {
+ /* count number of changes, not number of transactions */
+ ++lc->changed;
+ e->lc_number = e->lc_new_number;
+ list_move(&e->list, &lc->in_use);
+ }
+ lc->pending_changes = 0;
+ RETURN();
+}
+
+
+/**
+ * lc_put - give up refcnt of @e
+ * @lc: the lru cache to operate on
+ * @e: the element to put
+ *
+ * If refcnt reaches zero, the element is moved to the lru list,
+ * and a %LC_STARVING (if set) is cleared.
+ * Returns the new (post-decrement) refcnt.
+ */
+unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
+{
+ PARANOIA_ENTRY();
+ PARANOIA_LC_ELEMENT(lc, e);
+ BUG_ON(e->refcnt == 0);
+ BUG_ON(e->lc_number != e->lc_new_number);
+ if (--e->refcnt == 0) {
+ /* move it to the front of LRU. */
+ list_move(&e->list, &lc->lru);
+ lc->used--;
+ clear_bit_unlock(__LC_STARVING, &lc->flags);
+ }
+ RETURN(e->refcnt);
+}
+
+/**
+ * lc_element_by_index
+ * @lc: the lru cache to operate on
+ * @i: the index of the element to return
+ */
+struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
+{
+ BUG_ON(i >= lc->nr_elements);
+ BUG_ON(lc->lc_element[i] == NULL);
+ BUG_ON(lc->lc_element[i]->lc_index != i);
+ return lc->lc_element[i];
+}
+
+/**
+ * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
+ * @lc: the lru cache to operate on
+ * @seq: the &struct seq_file pointer to seq_printf into
+ * @utext: user supplied additional "heading" or other info
+ * @detail: function pointer the user may provide to dump further details
+ * of the object the lc_element is embedded in. May be NULL.
+ * Note: a leading space ' ' and trailing newline '\n' is implied.
+ */
+void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
+ void (*detail) (struct seq_file *, struct lc_element *))
+{
+ unsigned int nr_elements = lc->nr_elements;
+ struct lc_element *e;
+ int i;
+
+ seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext);
+ for (i = 0; i < nr_elements; i++) {
+ e = lc_element_by_index(lc, i);
+ if (e->lc_number != e->lc_new_number)
+ seq_printf(seq, "\t%5d: %6d %8d %6d ",
+ i, e->lc_number, e->lc_new_number, e->refcnt);
+ else
+ seq_printf(seq, "\t%5d: %6d %-8s %6d ",
+ i, e->lc_number, "-\"-", e->refcnt);
+ if (detail)
+ detail(seq, e);
+ seq_putc(seq, '\n');
+ }
+}
+
+EXPORT_SYMBOL(lc_create);
+EXPORT_SYMBOL(lc_reset);
+EXPORT_SYMBOL(lc_destroy);
+EXPORT_SYMBOL(lc_del);
+EXPORT_SYMBOL(lc_try_get);
+EXPORT_SYMBOL(lc_find);
+EXPORT_SYMBOL(lc_get);
+EXPORT_SYMBOL(lc_put);
+EXPORT_SYMBOL(lc_committed);
+EXPORT_SYMBOL(lc_element_by_index);
+EXPORT_SYMBOL(lc_seq_printf_stats);
+EXPORT_SYMBOL(lc_seq_dump_details);
+EXPORT_SYMBOL(lc_try_lock);
+EXPORT_SYMBOL(lc_is_used);
+EXPORT_SYMBOL(lc_get_cumulative);
diff --git a/lib/lshrdi3.c b/lib/lshrdi3.c
new file mode 100644
index 000000000..312838ce2
--- /dev/null
+++ b/lib/lshrdi3.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lib/lshrdi3.c
+ */
+
+#include <linux/module.h>
+#include <linux/libgcc.h>
+
+long long notrace __lshrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.high = 0;
+ w.s.low = (unsigned int) uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = (unsigned int) uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__lshrdi3);
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
new file mode 100644
index 000000000..5b42242af
--- /dev/null
+++ b/lib/lz4/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -O3
+
+obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
+obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
+obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
new file mode 100644
index 000000000..90bb67994
--- /dev/null
+++ b/lib/lz4/lz4_compress.c
@@ -0,0 +1,940 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
+ */
+
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+
+static const int LZ4_minLength = (MFLIMIT + 1);
+static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
+
+/*-******************************
+ * Compression functions
+ ********************************/
+static FORCE_INLINE U32 LZ4_hash4(
+ U32 sequence,
+ tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+ else
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - LZ4_HASHLOG));
+}
+
+static FORCE_INLINE U32 LZ4_hash5(
+ U64 sequence,
+ tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16)
+ ? LZ4_HASHLOG + 1
+ : LZ4_HASHLOG;
+
+#if LZ4_LITTLE_ENDIAN
+ static const U64 prime5bytes = 889523592379ULL;
+
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+#else
+ static const U64 prime8bytes = 11400714785074694791ULL;
+
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+#endif
+}
+
+static FORCE_INLINE U32 LZ4_hashPosition(
+ const void *p,
+ tableType_t const tableType)
+{
+#if LZ4_ARCH64
+ if (tableType == byU32)
+ return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+#endif
+
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+static void LZ4_putPositionOnHash(
+ const BYTE *p,
+ U32 h,
+ void *tableBase,
+ tableType_t const tableType,
+ const BYTE *srcBase)
+{
+ switch (tableType) {
+ case byPtr:
+ {
+ const BYTE **hashTable = (const BYTE **)tableBase;
+
+ hashTable[h] = p;
+ return;
+ }
+ case byU32:
+ {
+ U32 *hashTable = (U32 *) tableBase;
+
+ hashTable[h] = (U32)(p - srcBase);
+ return;
+ }
+ case byU16:
+ {
+ U16 *hashTable = (U16 *) tableBase;
+
+ hashTable[h] = (U16)(p - srcBase);
+ return;
+ }
+ }
+}
+
+static FORCE_INLINE void LZ4_putPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+static const BYTE *LZ4_getPositionOnHash(
+ U32 h,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ if (tableType == byPtr) {
+ const BYTE **hashTable = (const BYTE **) tableBase;
+
+ return hashTable[h];
+ }
+
+ if (tableType == byU32) {
+ const U32 * const hashTable = (U32 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+
+ {
+ /* default, to ensure a return */
+ const U16 * const hashTable = (U16 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+}
+
+static FORCE_INLINE const BYTE *LZ4_getPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
+
+
+/*
+ * LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time
+ */
+static FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal * const dictPtr,
+ const char * const source,
+ char * const dest,
+ const int inputSize,
+ const int maxOutputSize,
+ const limitedOutput_directive outputLimited,
+ const tableType_t tableType,
+ const dict_directive dict,
+ const dictIssue_directive dictIssue,
+ const U32 acceleration)
+{
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *base;
+ const BYTE *lowLimit;
+ const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
+ const BYTE * const dictionary = dictPtr->dictionary;
+ const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
+ const size_t dictDelta = dictEnd - (const BYTE *)source;
+ const BYTE *anchor = (const BYTE *) source;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dest;
+ BYTE * const olimit = op + maxOutputSize;
+
+ U32 forwardH;
+ size_t refDelta = 0;
+
+ /* Init conditions */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
+ /* Unsupported inputSize, too large (or negative) */
+ return 0;
+ }
+
+ switch (dict) {
+ case noDict:
+ default:
+ base = (const BYTE *)source;
+ lowLimit = (const BYTE *)source;
+ break;
+ case withPrefix64k:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source - dictPtr->dictSize;
+ break;
+ case usingExtDict:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source;
+ break;
+ }
+
+ if ((tableType == byU16)
+ && (inputSize >= LZ4_64Klimit)) {
+ /* Size too large (not within 64K limit) */
+ return 0;
+ }
+
+ if (inputSize < LZ4_minLength) {
+ /* Input too small, no compression (all literals) */
+ goto _last_literals;
+ }
+
+ /* First Byte */
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+ ip++;
+ forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
+
+ /* Find a match */
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 const h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h,
+ dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ } }
+
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+
+ LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
+ tableType, base);
+ } while (((dictIssue == dictSmall)
+ ? (match < lowRefLimit)
+ : 0)
+ || ((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match + refDelta)
+ != LZ4_read32(ip)));
+ }
+
+ /* Catch up */
+ while (((ip > anchor) & (match + refDelta > lowLimit))
+ && (unlikely(ip[-1] == match[refDelta - 1]))) {
+ ip--;
+ match--;
+ }
+
+ /* Encode Literals */
+ {
+ unsigned const int litLength = (unsigned int)(ip - anchor);
+
+ token = op++;
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ (unlikely(op + litLength +
+ (2 + 1 + LASTLITERALS) +
+ (litLength / 255) > olimit)))
+ return 0;
+
+ if (litLength >= RUN_MASK) {
+ int len = (int)litLength - RUN_MASK;
+
+ *token = (RUN_MASK << ML_BITS);
+
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
+
+_next_match:
+ /* Encode Offset */
+ LZ4_writeLE16(op, (U16)(ip - match));
+ op += 2;
+
+ /* Encode MatchLength */
+ {
+ unsigned int matchCode;
+
+ if ((dict == usingExtDict)
+ && (lowLimit == dictionary)) {
+ const BYTE *limit;
+
+ match += refDelta;
+ limit = ip + (dictEnd - match);
+
+ if (limit > matchlimit)
+ limit = matchlimit;
+
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, limit);
+
+ ip += MINMATCH + matchCode;
+
+ if (ip == limit) {
+ unsigned const int more = LZ4_count(ip,
+ (const BYTE *)source,
+ matchlimit);
+
+ matchCode += more;
+ ip += more;
+ }
+ } else {
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
+ ip += MINMATCH + matchCode;
+ }
+
+ if (outputLimited &&
+ /* Check output buffer overflow */
+ (unlikely(op +
+ (1 + LASTLITERALS) +
+ (matchCode >> 8) > olimit)))
+ return 0;
+
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+
+ while (matchCode >= 4 * 255) {
+ op += 4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4 * 255;
+ }
+
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip > mflimit)
+ break;
+
+ /* Fill table */
+ LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
+
+ /* Test next position */
+ match = LZ4_getPosition(ip, dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ }
+ }
+
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+
+ if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
+ && (match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
+ token = op++;
+ *token = 0;
+ goto _next_match;
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t const lastRun = (size_t)(iend - anchor);
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ ((op - (BYTE *)dest) + lastRun + 1 +
+ ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
+ return 0;
+
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun << ML_BITS);
+ }
+
+ LZ4_memcpy(op, anchor, lastRun);
+
+ op += lastRun;
+ }
+
+ /* End */
+ return (int) (((char *)op) - dest);
+}
+
+static int LZ4_compress_fast_extState(
+ void *state,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ int acceleration)
+{
+ LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
+
+ LZ4_resetStream((LZ4_stream_t *)state);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, tableType, noDict,
+ noDictIssue, acceleration);
+ } else {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, tableType, noDict,
+ noDictIssue, acceleration);
+ }
+}
+
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem)
+{
+ return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
+ maxOutputSize, acceleration);
+}
+EXPORT_SYMBOL(LZ4_compress_fast);
+
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem)
+{
+ return LZ4_compress_fast(source, dest, inputSize,
+ maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
+}
+EXPORT_SYMBOL(LZ4_compress_default);
+
+/*-******************************
+ * *_destSize() variant
+ ********************************/
+static int LZ4_compress_destSize_generic(
+ LZ4_stream_t_internal * const ctx,
+ const char * const src,
+ char * const dst,
+ int * const srcSizePtr,
+ const int targetDstSize,
+ const tableType_t tableType)
+{
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE *base = (const BYTE *) src;
+ const BYTE *lowLimit = (const BYTE *) src;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + *srcSizePtr;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dst;
+ BYTE * const oend = op + targetDstSize;
+ BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
+ - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
+ BYTE * const oMaxMatch = op + targetDstSize
+ - (LASTLITERALS + 1 /* token */);
+ BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
+
+ U32 forwardH;
+
+ /* Init conditions */
+ /* Impossible to store anything */
+ if (targetDstSize < 1)
+ return 0;
+ /* Unsupported input size, too large (or negative) */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
+ return 0;
+ /* Size too large (not within 64K limit) */
+ if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
+ return 0;
+ /* Input too small, no compression (all literals) */
+ if (*srcSizePtr < LZ4_minLength)
+ goto _last_literals;
+
+ /* First Byte */
+ *srcSizePtr = 0;
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
+
+ /* Find a match */
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h, ctx->hashTable,
+ tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+ LZ4_putPositionOnHash(ip, h,
+ ctx->hashTable, tableType,
+ base);
+
+ } while (((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match) != LZ4_read32(ip)));
+ }
+
+ /* Catch up */
+ while ((ip > anchor)
+ && (match > lowLimit)
+ && (unlikely(ip[-1] == match[-1]))) {
+ ip--;
+ match--;
+ }
+
+ /* Encode Literal length */
+ {
+ unsigned int litLength = (unsigned int)(ip - anchor);
+
+ token = op++;
+ if (op + ((litLength + 240) / 255)
+ + litLength > oMaxLit) {
+ /* Not enough space for a last match */
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ unsigned int len = litLength - RUN_MASK;
+ *token = (RUN_MASK<<ML_BITS);
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
+
+_next_match:
+ /* Encode Offset */
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
+
+ /* Encode MatchLength */
+ {
+ size_t matchLength = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
+
+ if (op + ((matchLength + 240)/255) > oMaxMatch) {
+ /* Match description too long : reduce it */
+ matchLength = (15 - 1) + (oMaxMatch - op) * 255;
+ }
+ ip += MINMATCH + matchLength;
+
+ if (matchLength >= ML_MASK) {
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ while (matchLength >= 255) {
+ matchLength -= 255;
+ *op++ = 255;
+ }
+ *op++ = (BYTE)matchLength;
+ } else
+ *token += (BYTE)(matchLength);
+ }
+
+ anchor = ip;
+
+ /* Test end of block */
+ if (ip > mflimit)
+ break;
+ if (op > oMaxSeq)
+ break;
+
+ /* Fill table */
+ LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
+
+ /* Test next position */
+ match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+
+ if ((match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++; *token = 0;
+ goto _next_match;
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t lastRunSize = (size_t)(iend - anchor);
+
+ if (op + 1 /* token */
+ + ((lastRunSize + 240) / 255) /* litLength */
+ + lastRunSize /* literals */ > oend) {
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (oend - op) - 1;
+ lastRunSize -= (lastRunSize + 240) / 255;
+ }
+ ip = anchor + lastRunSize;
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize<<ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char *)ip) - src);
+ return (int) (((char *)op) - dst);
+}
+
+static int LZ4_compress_destSize_extState(
+ LZ4_stream_t *state,
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize)
+{
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
+
+ LZ4_resetStream(state);
+
+ if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
+ /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(
+ state, src, dst, *srcSizePtr,
+ targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit)
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, byU16);
+ else
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, tableType);
+ }
+}
+
+
+int LZ4_compress_destSize(
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize,
+ void *wrkmem)
+{
+ return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
+ targetDstSize);
+}
+EXPORT_SYMBOL(LZ4_compress_destSize);
+
+/*-******************************
+ * Streaming functions
+ ********************************/
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
+{
+ memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
+}
+
+int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
+ const char *dictionary, int dictSize)
+{
+ LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
+ const BYTE *p = (const BYTE *)dictionary;
+ const BYTE * const dictEnd = p + dictSize;
+ const BYTE *base;
+
+ if ((dict->initCheck)
+ || (dict->currentOffset > 1 * GB)) {
+ /* Uninitialized structure, or reuse overflow */
+ LZ4_resetStream(LZ4_dict);
+ }
+
+ if (dictSize < (int)HASH_UNIT) {
+ dict->dictionary = NULL;
+ dict->dictSize = 0;
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 * KB)
+ p = dictEnd - 64 * KB;
+ dict->currentOffset += 64 * KB;
+ base = p - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->currentOffset += dict->dictSize;
+
+ while (p <= dictEnd - HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, byU32, base);
+ p += 3;
+ }
+
+ return dict->dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDict);
+
+static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
+ const BYTE *src)
+{
+ if ((LZ4_dict->currentOffset > 0x80000000) ||
+ ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
+ /* address space overflow */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 * KB;
+ const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+
+ for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta)
+ LZ4_dict->hashTable[i] = 0;
+ else
+ LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 * KB;
+ if (LZ4_dict->dictSize > 64 * KB)
+ LZ4_dict->dictSize = 64 * KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
+ const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 * KB) {
+ /* useless to define a dictionary > 64 * KB */
+ dictSize = 64 * KB;
+ }
+ if ((U32)dictSize > dict->dictSize)
+ dictSize = dict->dictSize;
+
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE *)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDict);
+
+int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
+ char *dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
+ const BYTE * const dictEnd = streamPtr->dictionary
+ + streamPtr->dictSize;
+
+ const BYTE *smallest = (const BYTE *) source;
+
+ if (streamPtr->initCheck) {
+ /* Uninitialized structure detected */
+ return 0;
+ }
+
+ if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
+ smallest = dictEnd;
+
+ LZ4_renormDictT(streamPtr, smallest);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+
+ if ((sourceEnd > streamPtr->dictionary)
+ && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 * KB)
+ streamPtr->dictSize = 64 * KB;
+ if (streamPtr->dictSize < 4)
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE *)source) {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, noDictIssue, acceleration);
+ }
+ streamPtr->dictSize += (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, noDictIssue, acceleration);
+ }
+ streamPtr->dictionary = (const BYTE *)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+}
+EXPORT_SYMBOL(LZ4_compress_fast_continue);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
new file mode 100644
index 000000000..59fe69a63
--- /dev/null
+++ b/lib/lz4/lz4_decompress.c
@@ -0,0 +1,720 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
+ */
+
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+
+/*-*****************************
+ * Decompression functions
+ *******************************/
+
+#define DEBUGLOG(l, ...) {} /* disabled */
+
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+
+/*
+ * LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+static FORCE_INLINE int LZ4_decompress_generic(
+ const char * const src,
+ char * const dst,
+ int srcSize,
+ /*
+ * If endOnInput == endOnInputSize,
+ * this value is `dstCapacity`
+ */
+ int outputSize,
+ /* endOnOutputSize, endOnInputSize */
+ endCondition_directive endOnInput,
+ /* full, partial */
+ earlyEnd_directive partialDecoding,
+ /* noDict, withPrefix64k, usingExtDict */
+ dict_directive dict,
+ /* always <= dst, == dst when no prefix */
+ const BYTE * const lowPrefix,
+ /* only if dict == usingExtDict */
+ const BYTE * const dictStart,
+ /* note : = 0 if noDict */
+ const size_t dictSize
+ )
+{
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE * const iend = ip + srcSize;
+
+ BYTE *op = (BYTE *) dst;
+ BYTE * const oend = op + outputSize;
+ BYTE *cpy;
+
+ const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
+ static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+ static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
+
+ const int safeDecode = (endOnInput == endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE *const shortiend = iend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE *const shortoend = oend -
+ (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
+ srcSize, outputSize);
+
+ /* Special cases */
+ assert(lowPrefix <= op);
+ assert(src != NULL);
+
+ /* Empty output buffer */
+ if ((endOnInput) && (unlikely(outputSize == 0)))
+ return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
+
+ if ((!endOnInput) && (unlikely(outputSize == 0)))
+ return (*ip == 0 ? 1 : -1);
+
+ if ((endOnInput) && unlikely(srcSize == 0))
+ return -1;
+
+ /* Main Loop : decode sequences */
+ while (1) {
+ size_t length;
+ const BYTE *match;
+ size_t offset;
+
+ /* get literal length */
+ unsigned int const token = *ip++;
+ length = token>>ML_BITS;
+
+ /* ip < iend before the increment */
+ assert(!endOnInput || ip <= iend);
+
+ /*
+ * A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough
+ * space, enter the shortcut and copy 16 bytes on behalf
+ * of the literals (in the fast mode, only 8 bytes can be
+ * safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes
+ * in a similar manner; but we ensure that there's enough
+ * space in the output for those 18 bytes earlier, upon
+ * entering the shortcut (in other words, there is a
+ * combined check for both stages).
+ *
+ * The & in the likely() below is intentionally not && so that
+ * some compilers can produce better parallelized runtime code
+ */
+ if ((endOnInput ? length != RUN_MASK : length <= 8)
+ /*
+ * strictly "less than" on input, to re-enter
+ * the loop with at least one byte
+ */
+ && likely((endOnInput ? ip < shortiend : 1) &
+ (op <= shortoend))) {
+ /* Copy the literals */
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /*
+ * The second stage:
+ * prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted.
+ */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ((length != ML_MASK) &&
+ (offset >= 8) &&
+ (dict == withPrefix64k || match >= lowPrefix)) {
+ /* Copy the match. */
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op + 16, match + 16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /*
+ * The second stage didn't work out, but the info
+ * is ready. Propel it right to the point of match
+ * copying.
+ */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ unsigned int s;
+
+ if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ do {
+ s = *ip++;
+ length += s;
+ } while (likely(endOnInput
+ ? ip < iend - RUN_MASK
+ : 1) & (s == 255));
+
+ if ((safeDecode)
+ && unlikely((uptrval)(op) +
+ length < (uptrval)(op))) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ if ((safeDecode)
+ && unlikely((uptrval)(ip) +
+ length < (uptrval)(ip))) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ }
+
+ /* copy literals */
+ cpy = op + length;
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+
+ if (((endOnInput) && ((cpy > oend - MFLIMIT)
+ || (ip + length > iend - (2 + 1 + LASTLITERALS))))
+ || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
+ if (partialDecoding) {
+ if (cpy > oend) {
+ /*
+ * Partial decoding :
+ * stop in the middle of literal segment
+ */
+ cpy = oend;
+ length = oend - op;
+ }
+ if ((endOnInput)
+ && (ip + length > iend)) {
+ /*
+ * Error :
+ * read attempt beyond
+ * end of input buffer
+ */
+ goto _output_error;
+ }
+ } else {
+ if ((!endOnInput)
+ && (cpy != oend)) {
+ /*
+ * Error :
+ * block decoding must
+ * stop exactly there
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && ((ip + length != iend)
+ || (cpy > oend))) {
+ /*
+ * Error :
+ * input must be consumed
+ */
+ goto _output_error;
+ }
+ }
+
+ /*
+ * supports overlapping memory regions; only matters
+ * for in-place decompression scenarios
+ */
+ LZ4_memmove(op, ip, length);
+ ip += length;
+ op += length;
+
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
+ break;
+ } else {
+ /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
+ op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip);
+ ip += 2;
+ match = op - offset;
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+_copy_match:
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
+ /* Error : offset outside buffers */
+ goto _output_error;
+ }
+
+ /* costs ~1%; silence an msan warning when offset == 0 */
+ /*
+ * note : when partialDecoding, there is no guarantee that
+ * at least 4 bytes remain available in output buffer
+ */
+ if (!partialDecoding) {
+ assert(oend > op);
+ assert(oend - op >= 4);
+
+ LZ4_write32(op, (U32)offset);
+ }
+
+ if (length == ML_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+
+ if ((endOnInput) && (ip > iend - LASTLITERALS))
+ goto _output_error;
+
+ length += s;
+ } while (s == 255);
+
+ if ((safeDecode)
+ && unlikely(
+ (uptrval)(op) + length < (uptrval)op)) {
+ /* overflow detection */
+ goto _output_error;
+ }
+ }
+
+ length += MINMATCH;
+
+ /* match starting within external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op + length > oend - LASTLITERALS)) {
+ /* doesn't respect parsing restriction */
+ if (!partialDecoding)
+ goto _output_error;
+ length = min(length, (size_t)(oend - op));
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /*
+ * match fits entirely within external
+ * dictionary : just copy
+ */
+ memmove(op, dictEnd - (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /*
+ * match stretches into both external
+ * dictionary and current block
+ */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) {
+ /* overlap copy */
+ BYTE * const endOfMatch = op + restSize;
+ const BYTE *copyFrom = lowPrefix;
+
+ while (op < endOfMatch)
+ *op++ = *copyFrom++;
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ }
+ }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ /*
+ * partialDecoding :
+ * may not respect endBlock parsing restrictions
+ */
+ assert(op <= oend);
+ if (partialDecoding &&
+ (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = min(length, (size_t)(oend - op));
+ const BYTE * const matchEnd = match + mlen;
+ BYTE * const copyEnd = op + mlen;
+
+ if (matchEnd > op) {
+ /* overlap copy */
+ while (op < copyEnd)
+ *op++ = *match++;
+ } else {
+ LZ4_memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend)
+ break;
+ continue;
+ }
+
+ if (unlikely(offset < 8)) {
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ LZ4_memcpy(op + 4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ LZ4_copy8(op, match);
+ match += 8;
+ }
+
+ op += 8;
+
+ if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
+
+ if (cpy > oend - LASTLITERALS) {
+ /*
+ * Error : last LASTLITERALS bytes
+ * must be literals (uncompressed)
+ */
+ goto _output_error;
+ }
+
+ if (op < oCopyLimit) {
+ LZ4_wildCopy(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy)
+ *op++ = *match++;
+ } else {
+ LZ4_copy8(op, match);
+ if (length > 16)
+ LZ4_wildCopy(op + 8, match + 8, cpy);
+ }
+ op = cpy; /* wildcopy correction */
+ }
+
+ /* end of decoding */
+ if (endOnInput) {
+ /* Nb of output bytes decoded */
+ return (int) (((char *)op) - dst);
+ } else {
+ /* Nb of input bytes read */
+ return (int) (((const char *)ip) - src);
+ }
+
+ /* Overflow error detected */
+_output_error:
+ return (int) (-(((const char *)ip) - src)) - 1;
+}
+
+int LZ4_decompress_safe(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block,
+ noDict, (BYTE *)dest, NULL, 0);
+}
+
+int LZ4_decompress_safe_partial(const char *src, char *dst,
+ int compressedSize, int targetOutputSize, int dstCapacity)
+{
+ dstCapacity = min(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE *)dst, NULL, 0);
+}
+
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+/* ===== Instantiate a few more decoding cases, used more than once. ===== */
+
+static int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
+ int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ withPrefix64k,
+ (BYTE *)dest - 64 * KB, NULL, 0);
+}
+
+static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
+ int compressedSize,
+ int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ noDict,
+ (BYTE *)dest - prefixSize, NULL, 0);
+}
+
+static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+static int LZ4_decompress_fast_extDict(const char *source, char *dest,
+ int originalSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/*
+ * The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+static FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+static FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
+ int originalSize, size_t prefixSize,
+ const void *dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest,
+ 0, originalSize,
+ endOnOutputSize, decode_full_block,
+ usingExtDict, (BYTE *)dest - prefixSize,
+ (const BYTE *)dictStart, dictSize);
+}
+
+/* ===== streaming decompression functions ===== */
+
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
+
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+
+/*
+ * *_continue() :
+ * These decoding functions allow decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks must still be available at the memory
+ * position where they were decoded.
+ * If it's not possible, save the relevant part of
+ * decoded data into a safe buffer,
+ * and indicate where it stands using LZ4_setStreamDecode()
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd =
+ &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 * KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source,
+ dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest,
+ compressedSize, maxOutputSize,
+ lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize += result;
+ lz4sd->prefixEnd += result;
+ } else {
+ /*
+ * The buffer wraps around, or they're
+ * switching to another buffer.
+ */
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_safe_forceExtDict(source, dest,
+ compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
+ }
+
+ return result;
+}
+
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE *)dest) {
+ if (lz4sd->prefixSize >= 64 * KB - 1 ||
+ lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest,
+ originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest,
+ originalSize, lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize += originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_fast_extDict(source, dest,
+ originalSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ }
+ return result;
+}
+
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
+{
+ if (dictSize == 0)
+ return LZ4_decompress_safe(source, dest,
+ compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 * KB - 1)
+ return LZ4_decompress_safe_withPrefix64k(source, dest,
+ compressedSize, maxOutputSize);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest,
+ compressedSize, maxOutputSize, dictSize);
+ }
+ return LZ4_decompress_safe_forceExtDict(source, dest,
+ compressedSize, maxOutputSize, dictStart, dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize,
+ const char *dictStart, int dictSize)
+{
+ if (dictSize == 0 || dictStart + dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+
+ return LZ4_decompress_fast_extDict(source, dest, originalSize,
+ dictStart, dictSize);
+}
+
+#ifndef STATIC
+EXPORT_SYMBOL(LZ4_decompress_safe);
+EXPORT_SYMBOL(LZ4_decompress_safe_partial);
+EXPORT_SYMBOL(LZ4_decompress_fast);
+EXPORT_SYMBOL(LZ4_setStreamDecode);
+EXPORT_SYMBOL(LZ4_decompress_safe_continue);
+EXPORT_SYMBOL(LZ4_decompress_fast_continue);
+EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
+EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("LZ4 decompressor");
+#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
new file mode 100644
index 000000000..330aa539b
--- /dev/null
+++ b/lib/lz4/lz4defs.h
@@ -0,0 +1,247 @@
+#ifndef __LZ4DEFS_H__
+#define __LZ4DEFS_H__
+
+/*
+ * lz4defs.h -- common and architecture specific defines for the kernel usage
+
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/bitops.h>
+#include <linux/string.h> /* memset, memcpy */
+
+#define FORCE_INLINE __always_inline
+
+/*-************************************
+ * Basic Types
+ **************************************/
+#include <linux/types.h>
+
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+
+/*-************************************
+ * Architecture specifics
+ **************************************/
+#if defined(CONFIG_64BIT)
+#define LZ4_ARCH64 1
+#else
+#define LZ4_ARCH64 0
+#endif
+
+#if defined(__LITTLE_ENDIAN)
+#define LZ4_LITTLE_ENDIAN 1
+#else
+#define LZ4_LITTLE_ENDIAN 0
+#endif
+
+/*-************************************
+ * Constants
+ **************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+/*
+ * ensure it's possible to write 2 x wildcopyLength
+ * without overflowing output buffer
+ */
+#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
+
+/* Increase this value ==> compression run slower on incompressible data */
+#define LZ4_SKIPTRIGGER 6
+
+#define HASH_UNIT sizeof(size_t)
+
+#define KB (1 << 10)
+#define MB (1 << 20)
+#define GB (1U << 30)
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define STEPSIZE sizeof(size_t)
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+
+/*-************************************
+ * Reading and writing into memory
+ **************************************/
+static FORCE_INLINE U16 LZ4_read16(const void *ptr)
+{
+ return get_unaligned((const U16 *)ptr);
+}
+
+static FORCE_INLINE U32 LZ4_read32(const void *ptr)
+{
+ return get_unaligned((const U32 *)ptr);
+}
+
+static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
+{
+ return get_unaligned((const size_t *)ptr);
+}
+
+static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
+{
+ return put_unaligned_le16(value, memPtr);
+}
+
+/*
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so apply its specialized memcpy() inlining logic. When
+ * possible, use __builtin_memcpy() to tell the compiler to analyze memcpy()
+ * as-if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#define LZ4_memmove(dst, src, size) __builtin_memmove(dst, src, size)
+
+static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+{
+#if LZ4_ARCH64
+ U64 a = get_unaligned((const U64 *)src);
+
+ put_unaligned(a, (U64 *)dst);
+#else
+ U32 a = get_unaligned((const U32 *)src);
+ U32 b = get_unaligned((const U32 *)src + 1);
+
+ put_unaligned(a, (U32 *)dst);
+ put_unaligned(b, (U32 *)dst + 1);
+#endif
+}
+
+/*
+ * customized variant of memcpy,
+ * which can overwrite up to 7 bytes beyond dstEnd
+ */
+static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
+ const void *srcPtr, void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
+
+ do {
+ LZ4_copy8(d, s);
+ d += 8;
+ s += 8;
+ } while (d < e);
+}
+
+static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
+{
+#if LZ4_LITTLE_ENDIAN
+ return __ffs(val) >> 3;
+#else
+ return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
+#endif
+}
+
+static FORCE_INLINE unsigned int LZ4_count(
+ const BYTE *pIn,
+ const BYTE *pMatch,
+ const BYTE *pInLimit)
+{
+ const BYTE *const pStart = pIn;
+
+ while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ continue;
+ }
+
+ pIn += LZ4_NbCommonBytes(diff);
+
+ return (unsigned int)(pIn - pStart);
+ }
+
+#if LZ4_ARCH64
+ if ((pIn < (pInLimit - 3))
+ && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ pIn += 4;
+ pMatch += 4;
+ }
+#endif
+
+ if ((pIn < (pInLimit - 1))
+ && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ pIn += 2;
+ pMatch += 2;
+ }
+
+ if ((pIn < pInLimit) && (*pMatch == *pIn))
+ pIn++;
+
+ return (unsigned int)(pIn - pStart);
+}
+
+typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
+
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
+
+#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
new file mode 100644
index 000000000..e7ac8694b
--- /dev/null
+++ b/lib/lz4/lz4hc_compress.c
@@ -0,0 +1,768 @@
+/*
+ * LZ4 HC - High Compression Mode of LZ4
+ * Copyright (C) 2011-2015, Yann Collet.
+ *
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
+ */
+
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h> /* memset */
+
+/* *************************************
+ * Local Constants and types
+ ***************************************/
+
+#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
+
+#define HASH_FUNCTION(i) (((i) * 2654435761U) \
+ >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
+#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
+
+static U32 LZ4HC_hashPtr(const void *ptr)
+{
+ return HASH_FUNCTION(LZ4_read32(ptr));
+}
+
+/**************************************
+ * HC Compression
+ **************************************/
+static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
+{
+ memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
+ memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+ hc4->nextToUpdate = 64 * KB;
+ hc4->base = start - 64 * KB;
+ hc4->end = start;
+ hc4->dictBase = start - 64 * KB;
+ hc4->dictLimit = 64 * KB;
+ hc4->lowLimit = 64 * KB;
+}
+
+/* Update chains up to ip (excluded) */
+static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
+ const BYTE *ip)
+{
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const hashTable = hc4->hashTable;
+ const BYTE * const base = hc4->base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base + idx);
+ size_t delta = idx - hashTable[h];
+
+ if (delta > MAX_DISTANCE)
+ delta = MAX_DISTANCE;
+
+ DELTANEXTU16(idx) = (U16)delta;
+
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ hc4->nextToUpdate = target;
+}
+
+static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
+ LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
+ const BYTE *ip,
+ const BYTE * const iLimit,
+ const BYTE **matchpos,
+ const int maxNbAttempts)
+{
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
+ const BYTE * const base = hc4->base;
+ const BYTE * const dictBase = hc4->dictBase;
+ const U32 dictLimit = hc4->dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ size_t ml = 0;
+
+ /* HC4 match finder */
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE * const match = base + matchIndex;
+
+ if (*(match + ml) == *(ip + ml)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ size_t const mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, iLimit) + MINMATCH;
+
+ if (mlt > ml) {
+ ml = mlt;
+ *matchpos = match;
+ }
+ }
+ } else {
+ const BYTE * const match = dictBase + matchIndex;
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ size_t mlt;
+ const BYTE *vLimit = ip
+ + (dictLimit - matchIndex);
+
+ if (vLimit > iLimit)
+ vLimit = iLimit;
+ mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, vLimit) + MINMATCH;
+ if ((ip + mlt == vLimit)
+ && (vLimit < iLimit))
+ mlt += LZ4_count(ip + mlt,
+ base + dictLimit,
+ iLimit);
+ if (mlt > ml) {
+ /* virtual matchpos */
+ ml = mlt;
+ *matchpos = base + matchIndex;
+ }
+ }
+ }
+ matchIndex -= DELTANEXTU16(matchIndex);
+ }
+
+ return (int)ml;
+}
+
+static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
+ LZ4HC_CCtx_internal *hc4,
+ const BYTE * const ip,
+ const BYTE * const iLowLimit,
+ const BYTE * const iHighLimit,
+ int longest,
+ const BYTE **matchpos,
+ const BYTE **startpos,
+ const int maxNbAttempts)
+{
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
+ const BYTE * const base = hc4->base;
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE * const lowPrefixPtr = base + dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ const BYTE * const dictBase = hc4->dictBase;
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ int delta = (int)(ip - iLowLimit);
+
+ /* First Match */
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE *matchPtr = base + matchIndex;
+
+ if (*(iLowLimit + longest)
+ == *(matchPtr - delta + longest)) {
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ int mlt = MINMATCH + LZ4_count(
+ ip + MINMATCH,
+ matchPtr + MINMATCH,
+ iHighLimit);
+ int back = 0;
+
+ while ((ip + back > iLowLimit)
+ && (matchPtr + back > lowPrefixPtr)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if (mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
+ }
+ }
+ }
+ } else {
+ const BYTE * const matchPtr = dictBase + matchIndex;
+
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ size_t mlt;
+ int back = 0;
+ const BYTE *vLimit = ip + (dictLimit - matchIndex);
+
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+
+ mlt = LZ4_count(ip + MINMATCH,
+ matchPtr + MINMATCH, vLimit) + MINMATCH;
+
+ if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
+ mlt += LZ4_count(ip + mlt, base + dictLimit,
+ iHighLimit);
+ while ((ip + back > iLowLimit)
+ && (matchIndex + back > lowLimit)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if ((int)mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
+ }
+ }
+ }
+
+ matchIndex -= DELTANEXTU16(matchIndex);
+ }
+
+ return longest;
+}
+
+static FORCE_INLINE int LZ4HC_encodeSequence(
+ const BYTE **ip,
+ BYTE **op,
+ const BYTE **anchor,
+ int matchLength,
+ const BYTE * const match,
+ limitedOutput_directive limitedOutputBuffer,
+ BYTE *oend)
+{
+ int length;
+ BYTE *token;
+
+ /* Encode Literal length */
+ length = (int)(*ip - *anchor);
+ token = (*op)++;
+
+ if ((limitedOutputBuffer)
+ && ((*op + (length>>8)
+ + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
+ if (length >= (int)RUN_MASK) {
+ int len;
+
+ *token = (RUN_MASK<<ML_BITS);
+ len = length - RUN_MASK;
+ for (; len > 254 ; len -= 255)
+ *(*op)++ = 255;
+ *(*op)++ = (BYTE)len;
+ } else
+ *token = (BYTE)(length<<ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(*op, *anchor, (*op) + length);
+ *op += length;
+
+ /* Encode Offset */
+ LZ4_writeLE16(*op, (U16)(*ip - match));
+ *op += 2;
+
+ /* Encode MatchLength */
+ length = (int)(matchLength - MINMATCH);
+
+ if ((limitedOutputBuffer)
+ && (*op + (length>>8)
+ + (1 + LASTLITERALS) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
+
+ if (length >= (int)ML_MASK) {
+ *token += ML_MASK;
+ length -= ML_MASK;
+
+ for (; length > 509 ; length -= 510) {
+ *(*op)++ = 255;
+ *(*op)++ = 255;
+ }
+
+ if (length > 254) {
+ length -= 255;
+ *(*op)++ = 255;
+ }
+
+ *(*op)++ = (BYTE)length;
+ } else
+ *token += (BYTE)(length);
+
+ /* Prepare next loop */
+ *ip += matchLength;
+ *anchor = *ip;
+
+ return 0;
+}
+
+static int LZ4HC_compress_generic(
+ LZ4HC_CCtx_internal *const ctx,
+ const char * const source,
+ char * const dest,
+ int const inputSize,
+ int const maxOutputSize,
+ int compressionLevel,
+ limitedOutput_directive limit
+ )
+{
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = (iend - LASTLITERALS);
+
+ BYTE *op = (BYTE *) dest;
+ BYTE * const oend = op + maxOutputSize;
+
+ unsigned int maxNbAttempts;
+ int ml, ml2, ml3, ml0;
+ const BYTE *ref = NULL;
+ const BYTE *start2 = NULL;
+ const BYTE *ref2 = NULL;
+ const BYTE *start3 = NULL;
+ const BYTE *ref3 = NULL;
+ const BYTE *start0;
+ const BYTE *ref0;
+
+ /* init */
+ if (compressionLevel > LZ4HC_MAX_CLEVEL)
+ compressionLevel = LZ4HC_MAX_CLEVEL;
+ if (compressionLevel < 1)
+ compressionLevel = LZ4HC_DEFAULT_CLEVEL;
+ maxNbAttempts = 1 << (compressionLevel - 1);
+ ctx->end += inputSize;
+
+ ip++;
+
+ /* Main Loop */
+ while (ip < mflimit) {
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
+ matchlimit, (&ref), maxNbAttempts);
+ if (!ml) {
+ ip++;
+ continue;
+ }
+
+ /* saved, in case we would skip too much */
+ start0 = ip;
+ ref0 = ref;
+ ml0 = ml;
+
+_Search2:
+ if (ip + ml < mflimit)
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0,
+ matchlimit, ml, &ref2,
+ &start2, maxNbAttempts);
+ else
+ ml2 = ml;
+
+ if (ml2 == ml) {
+ /* No better match */
+ if (LZ4HC_encodeSequence(&ip, &op,
+ &anchor, ml, ref, limit, oend))
+ return 0;
+ continue;
+ }
+
+ if (start0 < ip) {
+ if (start2 < ip + ml0) {
+ /* empirical */
+ ip = start0;
+ ref = ref0;
+ ml = ml0;
+ }
+ }
+
+ /* Here, start0 == ip */
+ if ((start2 - ip) < 3) {
+ /* First Match too small : removed */
+ ml = ml2;
+ ip = start2;
+ ref = ref2;
+ goto _Search2;
+ }
+
+_Search3:
+ /*
+ * Currently we have :
+ * ml2 > ml1, and
+ * ip1 + 3 <= ip2 (usually < ip1 + ml1)
+ */
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ int new_ml = ml;
+
+ if (new_ml > OPTIMAL_ML)
+ new_ml = OPTIMAL_ML;
+ if (ip + new_ml > start2 + ml2 - MINMATCH)
+ new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+
+ correction = new_ml - (int)(start2 - ip);
+
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ }
+ /*
+ * Now, we have start2 = ip + new_ml,
+ * with new_ml = min(ml, OPTIMAL_ML = 18)
+ */
+
+ if (start2 + ml2 < mflimit)
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2,
+ matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts);
+ else
+ ml3 = ml2;
+
+ if (ml3 == ml2) {
+ /* No better match : 2 sequences to encode */
+ /* ip & ref are known; Now for ml */
+ if (start2 < ip + ml)
+ ml = (int)(start2 - ip);
+ /* Now, encode 2 sequences */
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
+ ip = start2;
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml2, ref2, limit, oend))
+ return 0;
+ continue;
+ }
+
+ if (start3 < ip + ml + 3) {
+ /* Not enough space for match 2 : remove it */
+ if (start3 >= (ip + ml)) {
+ /* can write Seq1 immediately
+ * ==> Seq2 is removed,
+ * so Seq3 becomes Seq1
+ */
+ if (start2 < ip + ml) {
+ int correction = (int)(ip + ml - start2);
+
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ if (ml2 < MINMATCH) {
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ }
+ }
+
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
+ ip = start3;
+ ref = ref3;
+ ml = ml3;
+
+ start0 = start2;
+ ref0 = ref2;
+ ml0 = ml2;
+ goto _Search2;
+ }
+
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ goto _Search3;
+ }
+
+ /*
+ * OK, now we have 3 ascending matches;
+ * let's write at least the first one
+ * ip & ref are known; Now for ml
+ */
+ if (start2 < ip + ml) {
+ if ((start2 - ip) < (int)ML_MASK) {
+ int correction;
+
+ if (ml > OPTIMAL_ML)
+ ml = OPTIMAL_ML;
+ if (ip + ml > start2 + ml2 - MINMATCH)
+ ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ } else
+ ml = (int)(start2 - ip);
+ }
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
+ ref, limit, oend))
+ return 0;
+
+ ip = start2;
+ ref = ref2;
+ ml = ml2;
+
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+
+ goto _Search3;
+ }
+
+ /* Encode Last Literals */
+ {
+ int lastRun = (int)(iend - anchor);
+
+ if ((limit)
+ && (((char *)op - dest) + lastRun + 1
+ + ((lastRun + 255 - RUN_MASK)/255)
+ > (U32)maxOutputSize)) {
+ /* Check output limit */
+ return 0;
+ }
+ if (lastRun >= (int)RUN_MASK) {
+ *op++ = (RUN_MASK<<ML_BITS);
+ lastRun -= RUN_MASK;
+ for (; lastRun > 254 ; lastRun -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) lastRun;
+ } else
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ LZ4_memcpy(op, anchor, iend - anchor);
+ op += iend - anchor;
+ }
+
+ /* End */
+ return (int) (((char *)op) - dest);
+}
+
+static int LZ4_compress_HC_extStateHC(
+ void *state,
+ const char *src,
+ char *dst,
+ int srcSize,
+ int maxDstSize,
+ int compressionLevel)
+{
+ LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
+
+ if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
+ /* Error : state is not aligned
+ * for pointers (32 or 64 bits)
+ */
+ return 0;
+ }
+
+ LZ4HC_init(ctx, (const BYTE *)src);
+
+ if (maxDstSize < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, noLimit);
+}
+
+int LZ4_compress_HC(const char *src, char *dst, int srcSize,
+ int maxDstSize, int compressionLevel, void *wrkmem)
+{
+ return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
+ srcSize, maxDstSize, compressionLevel);
+}
+EXPORT_SYMBOL(LZ4_compress_HC);
+
+/**************************************
+ * Streaming Functions
+ **************************************/
+void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
+}
+
+int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *dictionary,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ if (dictSize > 64 * KB) {
+ dictionary += dictSize - 64 * KB;
+ dictSize = 64 * KB;
+ }
+ LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
+ if (dictSize >= 4)
+ LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
+ ctxPtr->end = (const BYTE *)dictionary + dictSize;
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDictHC);
+
+/* compression */
+
+static void LZ4HC_setExternalDict(
+ LZ4HC_CCtx_internal *ctxPtr,
+ const BYTE *newBlock)
+{
+ if (ctxPtr->end >= ctxPtr->base + 4) {
+ /* Referencing remaining dictionary content */
+ LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
+ }
+
+ /*
+ * Only one memory segment for extDict,
+ * so any previous extDict is lost at this stage
+ */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ /* match referencing will resume from there */
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit;
+}
+
+static int LZ4_compressHC_continue_generic(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ /* auto - init if forgotten */
+ if (ctxPtr->base == NULL)
+ LZ4HC_init(ctxPtr, (const BYTE *) source);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
+ - ctxPtr->dictLimit;
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr,
+ (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE *)source != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+ const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+
+ if ((sourceEnd > dictBegin)
+ && ((const BYTE *)source < dictEnd)) {
+ if (sourceEnd > dictEnd)
+ sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ }
+ }
+
+ return LZ4HC_compress_generic(ctxPtr, source, dest,
+ inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize)
+{
+ if (maxOutputSize < LZ4_compressBound(inputSize))
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, noLimit);
+}
+EXPORT_SYMBOL(LZ4_compress_HC_continue);
+
+/* dictionary saving */
+
+int LZ4_saveDictHC(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ char *safeBuffer,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end
+ - (streamPtr->base + streamPtr->dictLimit));
+
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ if (dictSize < 4)
+ dictSize = 0;
+ if (dictSize > prefixSize)
+ dictSize = prefixSize;
+
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+
+ {
+ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+
+ streamPtr->end = (const BYTE *)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
+ streamPtr->dictLimit = endIndex - dictSize;
+ streamPtr->lowLimit = endIndex - dictSize;
+
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDictHC);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("LZ4 HC compressor");
diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile
new file mode 100644
index 000000000..2f58fafbb
--- /dev/null
+++ b/lib/lzo/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+lzo_compress-objs := lzo1x_compress.o
+lzo_decompress-objs := lzo1x_decompress_safe.o
+
+obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o
+obj-$(CONFIG_LZO_DECOMPRESS) += lzo_decompress.o
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
new file mode 100644
index 000000000..9d31e7126
--- /dev/null
+++ b/lib/lzo/lzo1x_compress.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LZO1X Compressor from LZO
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <asm/unaligned.h>
+#include <linux/lzo.h>
+#include "lzodefs.h"
+
+static noinline size_t
+lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ size_t ti, void *wrkmem, signed char *state_offset,
+ const unsigned char bitstream_version)
+{
+ const unsigned char *ip;
+ unsigned char *op;
+ const unsigned char * const in_end = in + in_len;
+ const unsigned char * const ip_end = in + in_len - 20;
+ const unsigned char *ii;
+ lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
+
+ op = out;
+ ip = in;
+ ii = ip;
+ ip += ti < 4 ? 4 - ti : 0;
+
+ for (;;) {
+ const unsigned char *m_pos = NULL;
+ size_t t, m_len, m_off;
+ u32 dv;
+ u32 run_length = 0;
+literal:
+ ip += 1 + ((ip - ii) >> 5);
+next:
+ if (unlikely(ip >= ip_end))
+ break;
+ dv = get_unaligned_le32(ip);
+
+ if (dv == 0 && bitstream_version) {
+ const unsigned char *ir = ip + 4;
+ const unsigned char *limit = min(ip_end, ip + MAX_ZERO_RUN_LENGTH + 1);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ defined(LZO_FAST_64BIT_MEMORY_ACCESS)
+ u64 dv64;
+
+ for (; (ir + 32) <= limit; ir += 32) {
+ dv64 = get_unaligned((u64 *)ir);
+ dv64 |= get_unaligned((u64 *)ir + 1);
+ dv64 |= get_unaligned((u64 *)ir + 2);
+ dv64 |= get_unaligned((u64 *)ir + 3);
+ if (dv64)
+ break;
+ }
+ for (; (ir + 8) <= limit; ir += 8) {
+ dv64 = get_unaligned((u64 *)ir);
+ if (dv64) {
+# if defined(__LITTLE_ENDIAN)
+ ir += __builtin_ctzll(dv64) >> 3;
+# elif defined(__BIG_ENDIAN)
+ ir += __builtin_clzll(dv64) >> 3;
+# else
+# error "missing endian definition"
+# endif
+ break;
+ }
+ }
+#else
+ while ((ir < (const unsigned char *)
+ ALIGN((uintptr_t)ir, 4)) &&
+ (ir < limit) && (*ir == 0))
+ ir++;
+ if (IS_ALIGNED((uintptr_t)ir, 4)) {
+ for (; (ir + 4) <= limit; ir += 4) {
+ dv = *((u32 *)ir);
+ if (dv) {
+# if defined(__LITTLE_ENDIAN)
+ ir += __builtin_ctz(dv) >> 3;
+# elif defined(__BIG_ENDIAN)
+ ir += __builtin_clz(dv) >> 3;
+# else
+# error "missing endian definition"
+# endif
+ break;
+ }
+ }
+ }
+#endif
+ while (likely(ir < limit) && unlikely(*ir == 0))
+ ir++;
+ run_length = ir - ip;
+ if (run_length > MAX_ZERO_RUN_LENGTH)
+ run_length = MAX_ZERO_RUN_LENGTH;
+ } else {
+ t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
+ m_pos = in + dict[t];
+ dict[t] = (lzo_dict_t) (ip - in);
+ if (unlikely(dv != get_unaligned_le32(m_pos)))
+ goto literal;
+ }
+
+ ii -= ti;
+ ti = 0;
+ t = ip - ii;
+ if (t != 0) {
+ if (t <= 3) {
+ op[*state_offset] |= t;
+ COPY4(op, ii);
+ op += t;
+ } else if (t <= 16) {
+ *op++ = (t - 3);
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += t;
+ } else {
+ if (t <= 18) {
+ *op++ = (t - 3);
+ } else {
+ size_t tt = t - 18;
+ *op++ = 0;
+ while (unlikely(tt > 255)) {
+ tt -= 255;
+ *op++ = 0;
+ }
+ *op++ = tt;
+ }
+ do {
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += 16;
+ ii += 16;
+ t -= 16;
+ } while (t >= 16);
+ if (t > 0) do {
+ *op++ = *ii++;
+ } while (--t > 0);
+ }
+ }
+
+ if (unlikely(run_length)) {
+ ip += run_length;
+ run_length -= MIN_ZERO_RUN_LENGTH;
+ put_unaligned_le32((run_length << 21) | 0xfffc18
+ | (run_length & 0x7), op);
+ op += 4;
+ run_length = 0;
+ *state_offset = -3;
+ goto finished_writing_instruction;
+ }
+
+ m_len = 4;
+ {
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
+ u64 v;
+ v = get_unaligned((const u64 *) (ip + m_len)) ^
+ get_unaligned((const u64 *) (m_pos + m_len));
+ if (unlikely(v == 0)) {
+ do {
+ m_len += 8;
+ v = get_unaligned((const u64 *) (ip + m_len)) ^
+ get_unaligned((const u64 *) (m_pos + m_len));
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (v == 0);
+ }
+# if defined(__LITTLE_ENDIAN)
+ m_len += (unsigned) __builtin_ctzll(v) / 8;
+# elif defined(__BIG_ENDIAN)
+ m_len += (unsigned) __builtin_clzll(v) / 8;
+# else
+# error "missing endian definition"
+# endif
+#elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
+ u32 v;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (unlikely(v == 0)) {
+ do {
+ m_len += 4;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (v != 0)
+ break;
+ m_len += 4;
+ v = get_unaligned((const u32 *) (ip + m_len)) ^
+ get_unaligned((const u32 *) (m_pos + m_len));
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (v == 0);
+ }
+# if defined(__LITTLE_ENDIAN)
+ m_len += (unsigned) __builtin_ctz(v) / 8;
+# elif defined(__BIG_ENDIAN)
+ m_len += (unsigned) __builtin_clz(v) / 8;
+# else
+# error "missing endian definition"
+# endif
+#else
+ if (unlikely(ip[m_len] == m_pos[m_len])) {
+ do {
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (ip[m_len] != m_pos[m_len])
+ break;
+ m_len += 1;
+ if (unlikely(ip + m_len >= ip_end))
+ goto m_len_done;
+ } while (ip[m_len] == m_pos[m_len]);
+ }
+#endif
+ }
+m_len_done:
+
+ m_off = ip - m_pos;
+ ip += m_len;
+ if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
+ m_off -= 1;
+ *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
+ *op++ = (m_off >> 3);
+ } else if (m_off <= M3_MAX_OFFSET) {
+ m_off -= 1;
+ if (m_len <= M3_MAX_LEN)
+ *op++ = (M3_MARKER | (m_len - 2));
+ else {
+ m_len -= M3_MAX_LEN;
+ *op++ = M3_MARKER | 0;
+ while (unlikely(m_len > 255)) {
+ m_len -= 255;
+ *op++ = 0;
+ }
+ *op++ = (m_len);
+ }
+ *op++ = (m_off << 2);
+ *op++ = (m_off >> 6);
+ } else {
+ m_off -= 0x4000;
+ if (m_len <= M4_MAX_LEN)
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8)
+ | (m_len - 2));
+ else {
+ if (unlikely(((m_off & 0x403f) == 0x403f)
+ && (m_len >= 261)
+ && (m_len <= 264))
+ && likely(bitstream_version)) {
+ // Under lzo-rle, block copies
+ // for 261 <= length <= 264 and
+ // (distance & 0x80f3) == 0x80f3
+ // can result in ambiguous
+ // output. Adjust length
+ // to 260 to prevent ambiguity.
+ ip -= m_len - 260;
+ m_len = 260;
+ }
+ m_len -= M4_MAX_LEN;
+ *op++ = (M4_MARKER | ((m_off >> 11) & 8));
+ while (unlikely(m_len > 255)) {
+ m_len -= 255;
+ *op++ = 0;
+ }
+ *op++ = (m_len);
+ }
+ *op++ = (m_off << 2);
+ *op++ = (m_off >> 6);
+ }
+ *state_offset = -2;
+finished_writing_instruction:
+ ii = ip;
+ goto next;
+ }
+ *out_len = op - out;
+ return in_end - (ii - ti);
+}
+
+static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem, const unsigned char bitstream_version)
+{
+ const unsigned char *ip = in;
+ unsigned char *op = out;
+ unsigned char *data_start;
+ size_t l = in_len;
+ size_t t = 0;
+ signed char state_offset = -2;
+ unsigned int m4_max_offset;
+
+ // LZO v0 will never write 17 as first byte (except for zero-length
+ // input), so this is used to version the bitstream
+ if (bitstream_version > 0) {
+ *op++ = 17;
+ *op++ = bitstream_version;
+ m4_max_offset = M4_MAX_OFFSET_V1;
+ } else {
+ m4_max_offset = M4_MAX_OFFSET_V0;
+ }
+
+ data_start = op;
+
+ while (l > 20) {
+ size_t ll = min_t(size_t, l, m4_max_offset + 1);
+ uintptr_t ll_end = (uintptr_t) ip + ll;
+ if ((ll_end + ((t + ll) >> 5)) <= ll_end)
+ break;
+ BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
+ memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
+ t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem,
+ &state_offset, bitstream_version);
+ ip += ll;
+ op += *out_len;
+ l -= ll;
+ }
+ t += l;
+
+ if (t > 0) {
+ const unsigned char *ii = in + in_len - t;
+
+ if (op == data_start && t <= 238) {
+ *op++ = (17 + t);
+ } else if (t <= 3) {
+ op[state_offset] |= t;
+ } else if (t <= 18) {
+ *op++ = (t - 3);
+ } else {
+ size_t tt = t - 18;
+ *op++ = 0;
+ while (tt > 255) {
+ tt -= 255;
+ *op++ = 0;
+ }
+ *op++ = tt;
+ }
+ if (t >= 16) do {
+ COPY8(op, ii);
+ COPY8(op + 8, ii + 8);
+ op += 16;
+ ii += 16;
+ t -= 16;
+ } while (t >= 16);
+ if (t > 0) do {
+ *op++ = *ii++;
+ } while (--t > 0);
+ }
+
+ *op++ = M4_MARKER | 1;
+ *op++ = 0;
+ *op++ = 0;
+
+ *out_len = op - out;
+ return LZO_E_OK;
+}
+
+int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
+{
+ return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0);
+}
+
+int lzorle1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
+{
+ return lzogeneric1x_1_compress(in, in_len, out, out_len,
+ wrkmem, LZO_VERSION);
+}
+
+EXPORT_SYMBOL_GPL(lzo1x_1_compress);
+EXPORT_SYMBOL_GPL(lzorle1x_1_compress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZO1X-1 Compressor");
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
new file mode 100644
index 000000000..7892a40cf
--- /dev/null
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LZO1X Decompressor from LZO
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#ifndef STATIC
+#include <linux/module.h>
+#include <linux/kernel.h>
+#endif
+#include <asm/unaligned.h>
+#include <linux/lzo.h>
+#include "lzodefs.h"
+
+#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
+#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
+#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
+#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
+#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
+
+/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
+ * count without overflowing an integer. The multiply will overflow when
+ * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
+ * depending on the base count. Since the base count is taken from a u8
+ * and a few bits, it is safe to assume that it will always be lower than
+ * or equal to 2*255, thus we can always prevent any overflow by accepting
+ * two less 255 steps. See Documentation/staging/lzo.rst for more information.
+ */
+#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
+
+int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+{
+ unsigned char *op;
+ const unsigned char *ip;
+ size_t t, next;
+ size_t state = 0;
+ const unsigned char *m_pos;
+ const unsigned char * const ip_end = in + in_len;
+ unsigned char * const op_end = out + *out_len;
+
+ unsigned char bitstream_version;
+
+ op = out;
+ ip = in;
+
+ if (unlikely(in_len < 3))
+ goto input_overrun;
+
+ if (likely(in_len >= 5) && likely(*ip == 17)) {
+ bitstream_version = ip[1];
+ ip += 2;
+ } else {
+ bitstream_version = 0;
+ }
+
+ if (*ip > 17) {
+ t = *ip++ - 17;
+ if (t < 4) {
+ next = t;
+ goto match_next;
+ }
+ goto copy_literal_run;
+ }
+
+ for (;;) {
+ t = *ip++;
+ if (t < 16) {
+ if (likely(state == 0)) {
+ if (unlikely(t == 0)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
+ while (unlikely(*ip == 0)) {
+ ip++;
+ NEED_IP(1);
+ }
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 15 + *ip++;
+ }
+ t += 3;
+copy_literal_run:
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+ const unsigned char *ie = ip + t;
+ unsigned char *oe = op + t;
+ do {
+ COPY8(op, ip);
+ op += 8;
+ ip += 8;
+ COPY8(op, ip);
+ op += 8;
+ ip += 8;
+ } while (ip < ie);
+ ip = ie;
+ op = oe;
+ } else
+#endif
+ {
+ NEED_OP(t);
+ NEED_IP(t + 3);
+ do {
+ *op++ = *ip++;
+ } while (--t > 0);
+ }
+ state = 4;
+ continue;
+ } else if (state != 4) {
+ next = t & 3;
+ m_pos = op - 1;
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LB(m_pos);
+ NEED_OP(2);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+ goto match_next;
+ } else {
+ next = t & 3;
+ m_pos = op - (1 + M2_MAX_OFFSET);
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ t = 3;
+ }
+ } else if (t >= 64) {
+ next = t & 3;
+ m_pos = op - 1;
+ m_pos -= (t >> 2) & 7;
+ m_pos -= *ip++ << 3;
+ t = (t >> 5) - 1 + (3 - 1);
+ } else if (t >= 32) {
+ t = (t & 31) + (3 - 1);
+ if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
+ while (unlikely(*ip == 0)) {
+ ip++;
+ NEED_IP(1);
+ }
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 31 + *ip++;
+ NEED_IP(2);
+ }
+ m_pos = op - 1;
+ next = get_unaligned_le16(ip);
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ } else {
+ NEED_IP(2);
+ next = get_unaligned_le16(ip);
+ if (((next & 0xfffc) == 0xfffc) &&
+ ((t & 0xf8) == 0x18) &&
+ likely(bitstream_version)) {
+ NEED_IP(3);
+ t &= 7;
+ t |= ip[2] << 3;
+ t += MIN_ZERO_RUN_LENGTH;
+ NEED_OP(t);
+ memset(op, 0, t);
+ op += t;
+ next &= 3;
+ ip += 3;
+ goto match_next;
+ } else {
+ m_pos = op;
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
+
+ while (unlikely(*ip == 0)) {
+ ip++;
+ NEED_IP(1);
+ }
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
+
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
+ next = get_unaligned_le16(ip);
+ }
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ if (m_pos == op)
+ goto eof_found;
+ m_pos -= 0x4000;
+ }
+ }
+ TEST_LB(m_pos);
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (op - m_pos >= 8) {
+ unsigned char *oe = op + t;
+ if (likely(HAVE_OP(t + 15))) {
+ do {
+ COPY8(op, m_pos);
+ op += 8;
+ m_pos += 8;
+ COPY8(op, m_pos);
+ op += 8;
+ m_pos += 8;
+ } while (op < oe);
+ op = oe;
+ if (HAVE_IP(6)) {
+ state = next;
+ COPY4(op, ip);
+ op += next;
+ ip += next;
+ continue;
+ }
+ } else {
+ NEED_OP(t);
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+ }
+ } else
+#endif
+ {
+ unsigned char *oe = op + t;
+ NEED_OP(t);
+ op[0] = m_pos[0];
+ op[1] = m_pos[1];
+ op += 2;
+ m_pos += 2;
+ do {
+ *op++ = *m_pos++;
+ } while (op < oe);
+ }
+match_next:
+ state = next;
+ t = next;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+ COPY4(op, ip);
+ op += t;
+ ip += t;
+ } else
+#endif
+ {
+ NEED_IP(t + 3);
+ NEED_OP(t);
+ while (t > 0) {
+ *op++ = *ip++;
+ t--;
+ }
+ }
+ }
+
+eof_found:
+ *out_len = op - out;
+ return (t != 3 ? LZO_E_ERROR :
+ ip == ip_end ? LZO_E_OK :
+ ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN);
+
+input_overrun:
+ *out_len = op - out;
+ return LZO_E_INPUT_OVERRUN;
+
+output_overrun:
+ *out_len = op - out;
+ return LZO_E_OUTPUT_OVERRUN;
+
+lookbehind_overrun:
+ *out_len = op - out;
+ return LZO_E_LOOKBEHIND_OVERRUN;
+}
+#ifndef STATIC
+EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LZO1X Decompressor");
+
+#endif
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h
new file mode 100644
index 000000000..b60851fcf
--- /dev/null
+++ b/lib/lzo/lzodefs.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * lzodefs.h -- architecture, OS and compiler specific defines
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+
+/* Version
+ * 0: original lzo version
+ * 1: lzo with support for RLE
+ */
+#define LZO_VERSION 1
+
+#define COPY4(dst, src) \
+ put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
+#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
+#define COPY8(dst, src) \
+ put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst))
+#else
+#define COPY8(dst, src) \
+ COPY4(dst, src); COPY4((dst) + 4, (src) + 4)
+#endif
+
+#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
+#error "conflicting endian definitions"
+#elif defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
+#define LZO_USE_CTZ64 1
+#define LZO_USE_CTZ32 1
+#define LZO_FAST_64BIT_MEMORY_ACCESS
+#elif defined(CONFIG_X86) || defined(CONFIG_PPC)
+#define LZO_USE_CTZ32 1
+#elif defined(CONFIG_ARM) && (__LINUX_ARM_ARCH__ >= 5)
+#define LZO_USE_CTZ32 1
+#endif
+
+#define M1_MAX_OFFSET 0x0400
+#define M2_MAX_OFFSET 0x0800
+#define M3_MAX_OFFSET 0x4000
+#define M4_MAX_OFFSET_V0 0xbfff
+#define M4_MAX_OFFSET_V1 0xbffe
+
+#define M1_MIN_LEN 2
+#define M1_MAX_LEN 2
+#define M2_MIN_LEN 3
+#define M2_MAX_LEN 8
+#define M3_MIN_LEN 3
+#define M3_MAX_LEN 33
+#define M4_MIN_LEN 3
+#define M4_MAX_LEN 9
+
+#define M1_MARKER 0
+#define M2_MARKER 64
+#define M3_MARKER 32
+#define M4_MARKER 16
+
+#define MIN_ZERO_RUN_LENGTH 4
+#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH)
+
+#define lzo_dict_t unsigned short
+#define D_BITS 13
+#define D_SIZE (1u << D_BITS)
+#define D_MASK (D_SIZE - 1)
+#define D_HIGH ((D_MASK >> 1) + 1)
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
new file mode 100644
index 000000000..684689457
--- /dev/null
+++ b/lib/maple_tree.c
@@ -0,0 +1,7268 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Maple Tree implementation
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+/*
+ * DOC: Interesting implementation details of the Maple Tree
+ *
+ * Each node type has a number of slots for entries and a number of slots for
+ * pivots. In the case of dense nodes, the pivots are implied by the position
+ * and are simply the slot index + the minimum of the node.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value where as keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ *
+ *
+ * The following illustrates the layout of a range64 nodes slots and pivots.
+ *
+ *
+ * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
+ * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
+ * │ │ │ │ │ │ │ │ └─ Implied maximum
+ * │ │ │ │ │ │ │ └─ Pivot 14
+ * │ │ │ │ │ │ └─ Pivot 13
+ * │ │ │ │ │ └─ Pivot 12
+ * │ │ │ │ └─ Pivot 11
+ * │ │ │ └─ Pivot 2
+ * │ │ └─ Pivot 1
+ * │ └─ Pivot 0
+ * └─ Implied minimum
+ *
+ * Slot contents:
+ * Internal (non-leaf) nodes contain pointers to other nodes.
+ * Leaf nodes contain entries.
+ *
+ * The location of interest is often referred to as an offset. All offsets have
+ * a slot, but the last offset has an implied pivot from the node above (or
+ * UINT_MAX for the root node.
+ *
+ * Ranges complicate certain write activities. When modifying any of
+ * the B-tree variants, it is known that one entry will either be added or
+ * deleted. When modifying the Maple Tree, one store operation may overwrite
+ * the entire data set, or one half of the tree, or the middle half of the tree.
+ *
+ */
+
+
+#include <linux/maple_tree.h>
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <asm/barrier.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/maple_tree.h>
+
+#define MA_ROOT_PARENT 1
+
+/*
+ * Maple state flags
+ * * MA_STATE_BULK - Bulk insert mode
+ * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
+ * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
+ */
+#define MA_STATE_BULK 1
+#define MA_STATE_REBALANCE 2
+#define MA_STATE_PREALLOC 4
+
+#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
+#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
+#define ma_mnode_ptr(x) ((struct maple_node *)(x))
+#define ma_enode_ptr(x) ((struct maple_enode *)(x))
+static struct kmem_cache *maple_node_cache;
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+static const unsigned long mt_max[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_leaf_64] = ULONG_MAX,
+ [maple_range_64] = ULONG_MAX,
+ [maple_arange_64] = ULONG_MAX,
+};
+#define mt_node_max(x) mt_max[mte_node_type(x)]
+#endif
+
+static const unsigned char mt_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS,
+ [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
+};
+#define mt_slot_count(x) mt_slots[mte_node_type(x)]
+
+static const unsigned char mt_pivots[] = {
+ [maple_dense] = 0,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
+ [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
+};
+#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
+
+static const unsigned char mt_min_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS / 2,
+ [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
+ [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
+ [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
+};
+#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
+
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
+#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
+
+struct maple_big_node {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
+ union {
+ struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
+ struct {
+ unsigned long padding[MAPLE_BIG_NODE_GAPS];
+ unsigned long gap[MAPLE_BIG_NODE_GAPS];
+ };
+ };
+ unsigned char b_end;
+ enum maple_type type;
+};
+
+/*
+ * The maple_subtree_state is used to build a tree to replace a segment of an
+ * existing tree in a more atomic way. Any walkers of the older tree will hit a
+ * dead node and restart on updates.
+ */
+struct maple_subtree_state {
+ struct ma_state *orig_l; /* Original left side of subtree */
+ struct ma_state *orig_r; /* Original right side of subtree */
+ struct ma_state *l; /* New left side of subtree */
+ struct ma_state *m; /* New middle of subtree (rare) */
+ struct ma_state *r; /* New right side of subtree */
+ struct ma_topiary *free; /* nodes to be freed */
+ struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
+ struct maple_big_node *bn;
+};
+
+#ifdef CONFIG_KASAN_STACK
+/* Prevent mas_wr_bnode() from exceeding the stack frame limit */
+#define noinline_for_kasan noinline_for_stack
+#else
+#define noinline_for_kasan inline
+#endif
+
+/* Functions */
+static inline struct maple_node *mt_alloc_one(gfp_t gfp)
+{
+ return kmem_cache_alloc(maple_node_cache, gfp);
+}
+
+static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
+{
+ return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
+}
+
+static inline void mt_free_bulk(size_t size, void __rcu **nodes)
+{
+ kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
+}
+
+static void mt_free_rcu(struct rcu_head *head)
+{
+ struct maple_node *node = container_of(head, struct maple_node, rcu);
+
+ kmem_cache_free(maple_node_cache, node);
+}
+
+/*
+ * ma_free_rcu() - Use rcu callback to free a maple node
+ * @node: The node to free
+ *
+ * The maple tree uses the parent pointer to indicate this node is no longer in
+ * use and will be freed.
+ */
+static void ma_free_rcu(struct maple_node *node)
+{
+ WARN_ON(node->parent != ma_parent_ptr(node));
+ call_rcu(&node->rcu, mt_free_rcu);
+}
+
+static void mas_set_height(struct ma_state *mas)
+{
+ unsigned int new_flags = mas->tree->ma_flags;
+
+ new_flags &= ~MT_FLAGS_HEIGHT_MASK;
+ MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
+ new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
+ mas->tree->ma_flags = new_flags;
+}
+
+static unsigned int mas_mt_height(struct ma_state *mas)
+{
+ return mt_height(mas->tree);
+}
+
+static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+{
+ return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
+ MAPLE_NODE_TYPE_MASK;
+}
+
+static inline bool ma_is_dense(const enum maple_type type)
+{
+ return type < maple_leaf_64;
+}
+
+static inline bool ma_is_leaf(const enum maple_type type)
+{
+ return type < maple_range_64;
+}
+
+static inline bool mte_is_leaf(const struct maple_enode *entry)
+{
+ return ma_is_leaf(mte_node_type(entry));
+}
+
+/*
+ * We also reserve values with the bottom two bits set to '10' which are
+ * below 4096
+ */
+static inline bool mt_is_reserved(const void *entry)
+{
+ return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
+ xa_is_internal(entry);
+}
+
+static inline void mas_set_err(struct ma_state *mas, long err)
+{
+ mas->node = MA_ERROR(err);
+}
+
+static inline bool mas_is_ptr(const struct ma_state *mas)
+{
+ return mas->node == MAS_ROOT;
+}
+
+static inline bool mas_is_start(const struct ma_state *mas)
+{
+ return mas->node == MAS_START;
+}
+
+bool mas_is_err(struct ma_state *mas)
+{
+ return xa_is_err(mas->node);
+}
+
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
+{
+ if (unlikely(mas->node == MAS_OVERFLOW))
+ return true;
+
+ return false;
+}
+
+static __always_inline bool mas_is_underflow(struct ma_state *mas)
+{
+ if (unlikely(mas->node == MAS_UNDERFLOW))
+ return true;
+
+ return false;
+}
+
+static inline bool mas_searchable(struct ma_state *mas)
+{
+ if (mas_is_none(mas))
+ return false;
+
+ if (mas_is_ptr(mas))
+ return false;
+
+ return true;
+}
+
+static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+{
+ return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
+ * @entry: The maple encoded node
+ *
+ * Return: a maple topiary pointer
+ */
+static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
+{
+ return (struct maple_topiary *)
+ ((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * mas_mn() - Get the maple state node.
+ * @mas: The maple state
+ *
+ * Return: the maple node (not encoded - bare pointer).
+ */
+static inline struct maple_node *mas_mn(const struct ma_state *mas)
+{
+ return mte_to_node(mas->node);
+}
+
+/*
+ * mte_set_node_dead() - Set a maple encoded node as dead.
+ * @mn: The maple encoded node.
+ */
+static inline void mte_set_node_dead(struct maple_enode *mn)
+{
+ mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
+ smp_wmb(); /* Needed for RCU */
+}
+
+/* Bit 1 indicates the root is a node */
+#define MAPLE_ROOT_NODE 0x02
+/* maple_type stored bit 3-6 */
+#define MAPLE_ENODE_TYPE_SHIFT 0x03
+/* Bit 2 means a NULL somewhere below */
+#define MAPLE_ENODE_NULL 0x04
+
+static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
+ enum maple_type type)
+{
+ return (void *)((unsigned long)node |
+ (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
+}
+
+static inline void *mte_mk_root(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
+}
+
+static inline void *mte_safe_root(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
+}
+
+static inline void *mte_set_full(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
+}
+
+static inline void *mte_clear_full(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+}
+
+static inline bool mte_has_null(const struct maple_enode *node)
+{
+ return (unsigned long)node & MAPLE_ENODE_NULL;
+}
+
+static inline bool ma_is_root(struct maple_node *node)
+{
+ return ((unsigned long)node->parent & MA_ROOT_PARENT);
+}
+
+static inline bool mte_is_root(const struct maple_enode *node)
+{
+ return ma_is_root(mte_to_node(node));
+}
+
+static inline bool mas_is_root_limits(const struct ma_state *mas)
+{
+ return !mas->min && mas->max == ULONG_MAX;
+}
+
+static inline bool mt_is_alloc(struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
+}
+
+/*
+ * The Parent Pointer
+ * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
+ * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
+ * bit values need an extra bit to store the offset. This extra bit comes from
+ * a reuse of the last bit in the node type. This is possible by using bit 1 to
+ * indicate if bit 2 is part of the type or the slot.
+ *
+ * Note types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and alignment
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-7
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-7
+ */
+
+#define MAPLE_PARENT_ROOT 0x01
+
+#define MAPLE_PARENT_SLOT_SHIFT 0x03
+#define MAPLE_PARENT_SLOT_MASK 0xF8
+
+#define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
+#define MAPLE_PARENT_16B_SLOT_MASK 0xFC
+
+#define MAPLE_PARENT_RANGE64 0x06
+#define MAPLE_PARENT_RANGE32 0x04
+#define MAPLE_PARENT_NOT_RANGE16 0x02
+
+/*
+ * mte_parent_shift() - Get the parent shift for the slot storage.
+ * @parent: The parent pointer cast as an unsigned long
+ * Return: The shift into that pointer to the star to of the slot
+ */
+static inline unsigned long mte_parent_shift(unsigned long parent)
+{
+ /* Note bit 1 == 0 means 16B */
+ if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+ return MAPLE_PARENT_SLOT_SHIFT;
+
+ return MAPLE_PARENT_16B_SLOT_SHIFT;
+}
+
+/*
+ * mte_parent_slot_mask() - Get the slot mask for the parent.
+ * @parent: The parent pointer cast as an unsigned long.
+ * Return: The slot mask for that parent.
+ */
+static inline unsigned long mte_parent_slot_mask(unsigned long parent)
+{
+ /* Note bit 1 == 0 means 16B */
+ if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+ return MAPLE_PARENT_SLOT_MASK;
+
+ return MAPLE_PARENT_16B_SLOT_MASK;
+}
+
+/*
+ * mas_parent_type() - Return the maple_type of the parent from the stored
+ * parent type.
+ * @mas: The maple state
+ * @enode: The maple_enode to extract the parent's enum
+ * Return: The node->parent maple_type
+ */
+static inline
+enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
+{
+ unsigned long p_type;
+
+ p_type = (unsigned long)mte_to_node(enode)->parent;
+ if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
+ return 0;
+
+ p_type &= MAPLE_NODE_MASK;
+ p_type &= ~mte_parent_slot_mask(p_type);
+ switch (p_type) {
+ case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
+ if (mt_is_alloc(mas->tree))
+ return maple_arange_64;
+ return maple_range_64;
+ }
+
+ return 0;
+}
+
+/*
+ * mas_set_parent() - Set the parent node and encode the slot
+ * @enode: The encoded maple node.
+ * @parent: The encoded maple node that is the parent of @enode.
+ * @slot: The slot that @enode resides in @parent.
+ *
+ * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
+ * parent type.
+ */
+static inline
+void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
+ const struct maple_enode *parent, unsigned char slot)
+{
+ unsigned long val = (unsigned long)parent;
+ unsigned long shift;
+ unsigned long type;
+ enum maple_type p_type = mte_node_type(parent);
+
+ MAS_BUG_ON(mas, p_type == maple_dense);
+ MAS_BUG_ON(mas, p_type == maple_leaf_64);
+
+ switch (p_type) {
+ case maple_range_64:
+ case maple_arange_64:
+ shift = MAPLE_PARENT_SLOT_SHIFT;
+ type = MAPLE_PARENT_RANGE64;
+ break;
+ default:
+ case maple_dense:
+ case maple_leaf_64:
+ shift = type = 0;
+ break;
+ }
+
+ val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
+ val |= (slot << shift) | type;
+ mte_to_node(enode)->parent = ma_parent_ptr(val);
+}
+
+/*
+ * mte_parent_slot() - get the parent slot of @enode.
+ * @enode: The encoded maple node.
+ *
+ * Return: The slot in the parent node where @enode resides.
+ */
+static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+{
+ unsigned long val = (unsigned long)mte_to_node(enode)->parent;
+
+ if (val & MA_ROOT_PARENT)
+ return 0;
+
+ /*
+ * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
+ * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
+ */
+ return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
+}
+
+/*
+ * mte_parent() - Get the parent of @node.
+ * @node: The encoded maple node.
+ *
+ * Return: The parent maple node.
+ */
+static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+{
+ return (void *)((unsigned long)
+ (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * ma_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
+ * Return: true if dead, false otherwise.
+ */
+static inline bool ma_dead_node(const struct maple_node *node)
+{
+ struct maple_node *parent;
+
+ /* Do not reorder reads from the node prior to the parent check */
+ smp_rmb();
+ parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
+ return (parent == node);
+}
+
+/*
+ * mte_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
+ * Return: true if dead, false otherwise.
+ */
+static inline bool mte_dead_node(const struct maple_enode *enode)
+{
+ struct maple_node *parent, *node;
+
+ node = mte_to_node(enode);
+ /* Do not reorder reads from the node prior to the parent check */
+ smp_rmb();
+ parent = mte_parent(enode);
+ return (parent == node);
+}
+
+/*
+ * mas_allocated() - Get the number of nodes allocated in a maple state.
+ * @mas: The maple state
+ *
+ * The ma_state alloc member is overloaded to hold a pointer to the first
+ * allocated node or to the number of requested nodes to allocate. If bit 0 is
+ * set, then the alloc contains the number of requested nodes. If there is an
+ * allocated node, then the total allocated nodes is in that node.
+ *
+ * Return: The total number of nodes allocated
+ */
+static inline unsigned long mas_allocated(const struct ma_state *mas)
+{
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
+ return 0;
+
+ return mas->alloc->total;
+}
+
+/*
+ * mas_set_alloc_req() - Set the requested number of allocations.
+ * @mas: the maple state
+ * @count: the number of allocations.
+ *
+ * The requested number of allocations is either in the first allocated node,
+ * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
+ * no allocated node. Set the request either in the node or do the necessary
+ * encoding to store in @mas->alloc directly.
+ */
+static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
+{
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
+ if (!count)
+ mas->alloc = NULL;
+ else
+ mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
+ return;
+ }
+
+ mas->alloc->request_count = count;
+}
+
+/*
+ * mas_alloc_req() - get the requested number of allocations.
+ * @mas: The maple state
+ *
+ * The alloc count is either stored directly in @mas, or in
+ * @mas->alloc->request_count if there is at least one node allocated. Decode
+ * the request count if it's stored directly in @mas->alloc.
+ *
+ * Return: The allocation request count.
+ */
+static inline unsigned int mas_alloc_req(const struct ma_state *mas)
+{
+ if ((unsigned long)mas->alloc & 0x1)
+ return (unsigned long)(mas->alloc) >> 1;
+ else if (mas->alloc)
+ return mas->alloc->request_count;
+ return 0;
+}
+
+/*
+ * ma_pivots() - Get a pointer to the maple node pivots.
+ * @node - the maple node
+ * @type - the node type
+ *
+ * In the event of a dead node, this array may be %NULL
+ *
+ * Return: A pointer to the maple node pivots
+ */
+static inline unsigned long *ma_pivots(struct maple_node *node,
+ enum maple_type type)
+{
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.pivot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return node->mr64.pivot;
+ case maple_dense:
+ return NULL;
+ }
+ return NULL;
+}
+
+/*
+ * ma_gaps() - Get a pointer to the maple node gaps.
+ * @node - the maple node
+ * @type - the node type
+ *
+ * Return: A pointer to the maple node gaps
+ */
+static inline unsigned long *ma_gaps(struct maple_node *node,
+ enum maple_type type)
+{
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.gap;
+ case maple_range_64:
+ case maple_leaf_64:
+ case maple_dense:
+ return NULL;
+ }
+ return NULL;
+}
+
+/*
+ * mas_pivot() - Get the pivot at @piv of the maple encoded node.
+ * @mas: The maple state.
+ * @piv: The pivot.
+ *
+ * Return: the pivot at @piv of @mn.
+ */
+static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
+{
+ struct maple_node *node = mas_mn(mas);
+ enum maple_type type = mte_node_type(mas->node);
+
+ if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
+ mas_set_err(mas, -EIO);
+ return 0;
+ }
+
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.pivot[piv];
+ case maple_range_64:
+ case maple_leaf_64:
+ return node->mr64.pivot[piv];
+ case maple_dense:
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * mas_safe_pivot() - get the pivot at @piv or mas->max.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @piv: The pivot to fetch
+ * @type: The maple node type
+ *
+ * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
+ * otherwise.
+ */
+static inline unsigned long
+mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
+ unsigned char piv, enum maple_type type)
+{
+ if (piv >= mt_pivots[type])
+ return mas->max;
+
+ return pivots[piv];
+}
+
+/*
+ * mas_safe_min() - Return the minimum for a given offset.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @offset: The offset into the pivot array
+ *
+ * Return: The minimum range value that is contained in @offset.
+ */
+static inline unsigned long
+mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
+{
+ if (likely(offset))
+ return pivots[offset - 1] + 1;
+
+ return mas->min;
+}
+
+/*
+ * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
+ * @mn: The encoded maple node
+ * @piv: The pivot offset
+ * @val: The value of the pivot
+ */
+static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
+ unsigned long val)
+{
+ struct maple_node *node = mte_to_node(mn);
+ enum maple_type type = mte_node_type(mn);
+
+ BUG_ON(piv >= mt_pivots[type]);
+ switch (type) {
+ default:
+ case maple_range_64:
+ case maple_leaf_64:
+ node->mr64.pivot[piv] = val;
+ break;
+ case maple_arange_64:
+ node->ma64.pivot[piv] = val;
+ break;
+ case maple_dense:
+ break;
+ }
+
+}
+
+/*
+ * ma_slots() - Get a pointer to the maple node slots.
+ * @mn: The maple node
+ * @mt: The maple node type
+ *
+ * Return: A pointer to the maple node slots
+ */
+static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
+{
+ switch (mt) {
+ default:
+ case maple_arange_64:
+ return mn->ma64.slot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return mn->mr64.slot;
+ case maple_dense:
+ return mn->slot;
+ }
+}
+
+static inline bool mt_write_locked(const struct maple_tree *mt)
+{
+ return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
+ lockdep_is_held(&mt->ma_lock);
+}
+
+static inline bool mt_locked(const struct maple_tree *mt)
+{
+ return mt_external_lock(mt) ? mt_lock_is_held(mt) :
+ lockdep_is_held(&mt->ma_lock);
+}
+
+static inline void *mt_slot(const struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
+{
+ return rcu_dereference_check(slots[offset], mt_locked(mt));
+}
+
+static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
+ unsigned char offset)
+{
+ return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
+}
+/*
+ * mas_slot_locked() - Get the slot value when holding the maple tree lock.
+ * @mas: The maple state
+ * @slots: The pointer to the slots
+ * @offset: The offset into the slots array to fetch
+ *
+ * Return: The entry stored in @slots at the @offset.
+ */
+static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
+{
+ return mt_slot_locked(mas->tree, slots, offset);
+}
+
+/*
+ * mas_slot() - Get the slot value when not holding the maple tree lock.
+ * @mas: The maple state
+ * @slots: The pointer to the slots
+ * @offset: The offset into the slots array to fetch
+ *
+ * Return: The entry stored in @slots at the @offset
+ */
+static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
+{
+ return mt_slot(mas->tree, slots, offset);
+}
+
+/*
+ * mas_root() - Get the maple tree root.
+ * @mas: The maple state.
+ *
+ * Return: The pointer to the root of the tree
+ */
+static inline void *mas_root(struct ma_state *mas)
+{
+ return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
+}
+
+static inline void *mt_root_locked(struct maple_tree *mt)
+{
+ return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
+}
+
+/*
+ * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
+ * @mas: The maple state.
+ *
+ * Return: The pointer to the root of the tree
+ */
+static inline void *mas_root_locked(struct ma_state *mas)
+{
+ return mt_root_locked(mas->tree);
+}
+
+static inline struct maple_metadata *ma_meta(struct maple_node *mn,
+ enum maple_type mt)
+{
+ switch (mt) {
+ case maple_arange_64:
+ return &mn->ma64.meta;
+ default:
+ return &mn->mr64.meta;
+ }
+}
+
+/*
+ * ma_set_meta() - Set the metadata information of a node.
+ * @mn: The maple node
+ * @mt: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
+ unsigned char offset, unsigned char end)
+{
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
+ meta->end = end;
+}
+
+/*
+ * mt_clear_meta() - clear the metadata information of a node, if it exists
+ * @mt: The maple tree
+ * @mn: The maple node
+ * @type: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
+ enum maple_type type)
+{
+ struct maple_metadata *meta;
+ unsigned long *pivots;
+ void __rcu **slots;
+ void *next;
+
+ switch (type) {
+ case maple_range_64:
+ pivots = mn->mr64.pivot;
+ if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
+ slots = mn->mr64.slot;
+ next = mt_slot_locked(mt, slots,
+ MAPLE_RANGE64_SLOTS - 1);
+ if (unlikely((mte_to_node(next) &&
+ mte_node_type(next))))
+ return; /* no metadata, could be node */
+ }
+ fallthrough;
+ case maple_arange_64:
+ meta = ma_meta(mn, type);
+ break;
+ default:
+ return;
+ }
+
+ meta->gap = 0;
+ meta->end = 0;
+}
+
+/*
+ * ma_meta_end() - Get the data end of a node from the metadata
+ * @mn: The maple node
+ * @mt: The maple node type
+ */
+static inline unsigned char ma_meta_end(struct maple_node *mn,
+ enum maple_type mt)
+{
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ return meta->end;
+}
+
+/*
+ * ma_meta_gap() - Get the largest gap location of a node from the metadata
+ * @mn: The maple node
+ * @mt: The maple node type
+ */
+static inline unsigned char ma_meta_gap(struct maple_node *mn,
+ enum maple_type mt)
+{
+ return mn->ma64.meta.gap;
+}
+
+/*
+ * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
+ * @mn: The maple node
+ * @mn: The maple node type
+ * @offset: The location of the largest gap.
+ */
+static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
+ unsigned char offset)
+{
+
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
+}
+
+/*
+ * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
+ * @mat - the ma_topiary, a linked list of dead nodes.
+ * @dead_enode - the node to be marked as dead and added to the tail of the list
+ *
+ * Add the @dead_enode to the linked list in @mat.
+ */
+static inline void mat_add(struct ma_topiary *mat,
+ struct maple_enode *dead_enode)
+{
+ mte_set_node_dead(dead_enode);
+ mte_to_mat(dead_enode)->next = NULL;
+ if (!mat->tail) {
+ mat->tail = mat->head = dead_enode;
+ return;
+ }
+
+ mte_to_mat(mat->tail)->next = dead_enode;
+ mat->tail = dead_enode;
+}
+
+static void mt_free_walk(struct rcu_head *head);
+static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ bool free);
+/*
+ * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
+ * @mas - the maple state
+ * @mat - the ma_topiary linked list of dead nodes to free.
+ *
+ * Destroy walk a dead list.
+ */
+static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
+{
+ struct maple_enode *next;
+ struct maple_node *node;
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ while (mat->head) {
+ next = mte_to_mat(mat->head)->next;
+ node = mte_to_node(mat->head);
+ mt_destroy_walk(mat->head, mas->tree, !in_rcu);
+ if (in_rcu)
+ call_rcu(&node->rcu, mt_free_walk);
+ mat->head = next;
+ }
+}
+/*
+ * mas_descend() - Descend into the slot stored in the ma_state.
+ * @mas - the maple state.
+ *
+ * Note: Not RCU safe, only use in write side or debug code.
+ */
+static inline void mas_descend(struct ma_state *mas)
+{
+ enum maple_type type;
+ unsigned long *pivots;
+ struct maple_node *node;
+ void __rcu **slots;
+
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+
+ if (mas->offset)
+ mas->min = pivots[mas->offset - 1] + 1;
+ mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
+ mas->node = mas_slot(mas, slots, mas->offset);
+}
+
+/*
+ * mte_set_gap() - Set a maple node gap.
+ * @mn: The encoded maple node
+ * @gap: The offset of the gap to set
+ * @val: The gap value
+ */
+static inline void mte_set_gap(const struct maple_enode *mn,
+ unsigned char gap, unsigned long val)
+{
+ switch (mte_node_type(mn)) {
+ default:
+ break;
+ case maple_arange_64:
+ mte_to_node(mn)->ma64.gap[gap] = val;
+ break;
+ }
+}
+
+/*
+ * mas_ascend() - Walk up a level of the tree.
+ * @mas: The maple state
+ *
+ * Sets the @mas->max and @mas->min to the correct values when walking up. This
+ * may cause several levels of walking up to find the correct min and max.
+ * May find a dead node which will cause a premature return.
+ * Return: 1 on dead node, 0 otherwise
+ */
+static int mas_ascend(struct ma_state *mas)
+{
+ struct maple_enode *p_enode; /* parent enode. */
+ struct maple_enode *a_enode; /* ancestor enode. */
+ struct maple_node *a_node; /* ancestor node. */
+ struct maple_node *p_node; /* parent node. */
+ unsigned char a_slot;
+ enum maple_type a_type;
+ unsigned long min, max;
+ unsigned long *pivots;
+ bool set_max = false, set_min = false;
+
+ a_node = mas_mn(mas);
+ if (ma_is_root(a_node)) {
+ mas->offset = 0;
+ return 0;
+ }
+
+ p_node = mte_parent(mas->node);
+ if (unlikely(a_node == p_node))
+ return 1;
+
+ a_type = mas_parent_type(mas, mas->node);
+ mas->offset = mte_parent_slot(mas->node);
+ a_enode = mt_mk_node(p_node, a_type);
+
+ /* Check to make sure all parent information is still accurate */
+ if (p_node != mte_parent(mas->node))
+ return 1;
+
+ mas->node = a_enode;
+
+ if (mte_is_root(a_enode)) {
+ mas->max = ULONG_MAX;
+ mas->min = 0;
+ return 0;
+ }
+
+ if (!mas->min)
+ set_min = true;
+
+ if (mas->max == ULONG_MAX)
+ set_max = true;
+
+ min = 0;
+ max = ULONG_MAX;
+ do {
+ p_enode = a_enode;
+ a_type = mas_parent_type(mas, p_enode);
+ a_node = mte_parent(p_enode);
+ a_slot = mte_parent_slot(p_enode);
+ a_enode = mt_mk_node(a_node, a_type);
+ pivots = ma_pivots(a_node, a_type);
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
+
+ if (!set_min && a_slot) {
+ set_min = true;
+ min = pivots[a_slot - 1] + 1;
+ }
+
+ if (!set_max && a_slot < mt_pivots[a_type]) {
+ set_max = true;
+ max = pivots[a_slot];
+ }
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
+
+ if (unlikely(ma_is_root(a_node)))
+ break;
+
+ } while (!set_min || !set_max);
+
+ mas->max = max;
+ mas->min = min;
+ return 0;
+}
+
+/*
+ * mas_pop_node() - Get a previously allocated maple node from the maple state.
+ * @mas: The maple state
+ *
+ * Return: A pointer to a maple node.
+ */
+static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+{
+ struct maple_alloc *ret, *node = mas->alloc;
+ unsigned long total = mas_allocated(mas);
+ unsigned int req = mas_alloc_req(mas);
+
+ /* nothing or a request pending. */
+ if (WARN_ON(!total))
+ return NULL;
+
+ if (total == 1) {
+ /* single allocation in this ma_state */
+ mas->alloc = NULL;
+ ret = node;
+ goto single_node;
+ }
+
+ if (node->node_count == 1) {
+ /* Single allocation in this node. */
+ mas->alloc = node->slot[0];
+ mas->alloc->total = node->total - 1;
+ ret = node;
+ goto new_head;
+ }
+ node->total--;
+ ret = node->slot[--node->node_count];
+ node->slot[node->node_count] = NULL;
+
+single_node:
+new_head:
+ if (req) {
+ req++;
+ mas_set_alloc_req(mas, req);
+ }
+
+ memset(ret, 0, sizeof(*ret));
+ return (struct maple_node *)ret;
+}
+
+/*
+ * mas_push_node() - Push a node back on the maple state allocation.
+ * @mas: The maple state
+ * @used: The used maple node
+ *
+ * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
+ * requested node count as necessary.
+ */
+static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
+{
+ struct maple_alloc *reuse = (struct maple_alloc *)used;
+ struct maple_alloc *head = mas->alloc;
+ unsigned long count;
+ unsigned int requested = mas_alloc_req(mas);
+
+ count = mas_allocated(mas);
+
+ reuse->request_count = 0;
+ reuse->node_count = 0;
+ if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
+ head->slot[head->node_count++] = reuse;
+ head->total++;
+ goto done;
+ }
+
+ reuse->total = 1;
+ if ((head) && !((unsigned long)head & 0x1)) {
+ reuse->slot[0] = head;
+ reuse->node_count = 1;
+ reuse->total += head->total;
+ }
+
+ mas->alloc = reuse;
+done:
+ if (requested > 1)
+ mas_set_alloc_req(mas, requested - 1);
+}
+
+/*
+ * mas_alloc_nodes() - Allocate nodes into a maple state
+ * @mas: The maple state
+ * @gfp: The GFP Flags
+ */
+static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+{
+ struct maple_alloc *node;
+ unsigned long allocated = mas_allocated(mas);
+ unsigned int requested = mas_alloc_req(mas);
+ unsigned int count;
+ void **slots = NULL;
+ unsigned int max_req = 0;
+
+ if (!requested)
+ return;
+
+ mas_set_alloc_req(mas, 0);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ if (allocated)
+ return;
+ WARN_ON(!allocated);
+ }
+
+ if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
+ node = (struct maple_alloc *)mt_alloc_one(gfp);
+ if (!node)
+ goto nomem_one;
+
+ if (allocated) {
+ node->slot[0] = mas->alloc;
+ node->node_count = 1;
+ } else {
+ node->node_count = 0;
+ }
+
+ mas->alloc = node;
+ node->total = ++allocated;
+ requested--;
+ }
+
+ node = mas->alloc;
+ node->request_count = 0;
+ while (requested) {
+ max_req = MAPLE_ALLOC_SLOTS - node->node_count;
+ slots = (void **)&node->slot[node->node_count];
+ max_req = min(requested, max_req);
+ count = mt_alloc_bulk(gfp, max_req, slots);
+ if (!count)
+ goto nomem_bulk;
+
+ if (node->node_count == 0) {
+ node->slot[0]->node_count = 0;
+ node->slot[0]->request_count = 0;
+ }
+
+ node->node_count += count;
+ allocated += count;
+ node = node->slot[0];
+ requested -= count;
+ }
+ mas->alloc->total = allocated;
+ return;
+
+nomem_bulk:
+ /* Clean up potential freed allocations on bulk failure */
+ memset(slots, 0, max_req * sizeof(unsigned long));
+nomem_one:
+ mas_set_alloc_req(mas, requested);
+ if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
+ mas->alloc->total = allocated;
+ mas_set_err(mas, -ENOMEM);
+}
+
+/*
+ * mas_free() - Free an encoded maple node
+ * @mas: The maple state
+ * @used: The encoded maple node to free.
+ *
+ * Uses rcu free if necessary, pushes @used back on the maple state allocations
+ * otherwise.
+ */
+static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
+{
+ struct maple_node *tmp = mte_to_node(used);
+
+ if (mt_in_rcu(mas->tree))
+ ma_free_rcu(tmp);
+ else
+ mas_push_node(mas, tmp);
+}
+
+/*
+ * mas_node_count() - Check if enough nodes are allocated and request more if
+ * there is not enough nodes.
+ * @mas: The maple state
+ * @count: The number of nodes needed
+ * @gfp: the gfp flags
+ */
+static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
+{
+ unsigned long allocated = mas_allocated(mas);
+
+ if (allocated < count) {
+ mas_set_alloc_req(mas, count - allocated);
+ mas_alloc_nodes(mas, gfp);
+ }
+}
+
+/*
+ * mas_node_count() - Check if enough nodes are allocated and request more if
+ * there is not enough nodes.
+ * @mas: The maple state
+ * @count: The number of nodes needed
+ *
+ * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
+ */
+static void mas_node_count(struct ma_state *mas, int count)
+{
+ return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+/*
+ * mas_start() - Sets up maple state for operations.
+ * @mas: The maple state.
+ *
+ * If mas->node == MAS_START, then set the min, max and depth to
+ * defaults.
+ *
+ * Return:
+ * - If mas->node is an error or not MAS_START, return NULL.
+ * - If it's an empty tree: NULL & mas->node == MAS_NONE
+ * - If it's a single entry: The entry & mas->node == MAS_ROOT
+ * - If it's a tree: NULL & mas->node == safe root node.
+ */
+static inline struct maple_enode *mas_start(struct ma_state *mas)
+{
+ if (likely(mas_is_start(mas))) {
+ struct maple_enode *root;
+
+ mas->min = 0;
+ mas->max = ULONG_MAX;
+
+retry:
+ mas->depth = 0;
+ root = mas_root(mas);
+ /* Tree with nodes */
+ if (likely(xa_is_node(root))) {
+ mas->depth = 1;
+ mas->node = mte_safe_root(root);
+ mas->offset = 0;
+ if (mte_dead_node(mas->node))
+ goto retry;
+
+ return NULL;
+ }
+
+ /* empty tree */
+ if (unlikely(!root)) {
+ mas->node = MAS_NONE;
+ mas->offset = MAPLE_NODE_SLOTS;
+ return NULL;
+ }
+
+ /* Single entry tree */
+ mas->node = MAS_ROOT;
+ mas->offset = MAPLE_NODE_SLOTS;
+
+ /* Single entry tree. */
+ if (mas->index > 0)
+ return NULL;
+
+ return root;
+ }
+
+ return NULL;
+}
+
+/*
+ * ma_data_end() - Find the end of the data in a node.
+ * @node: The maple node
+ * @type: The maple node type
+ * @pivots: The array of pivots in the node
+ * @max: The maximum value in the node
+ *
+ * Uses metadata to find the end of the data when possible.
+ * Return: The zero indexed last slot with data (may be null).
+ */
+static inline unsigned char ma_data_end(struct maple_node *node,
+ enum maple_type type,
+ unsigned long *pivots,
+ unsigned long max)
+{
+ unsigned char offset;
+
+ if (!pivots)
+ return 0;
+
+ if (type == maple_arange_64)
+ return ma_meta_end(node, type);
+
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]))
+ return ma_meta_end(node, type);
+
+ if (likely(pivots[offset] == max))
+ return offset;
+
+ return mt_pivots[type];
+}
+
+/*
+ * mas_data_end() - Find the end of the data (slot).
+ * @mas: the maple state
+ *
+ * This method is optimized to check the metadata of a node if the node type
+ * supports data end metadata.
+ *
+ * Return: The zero indexed last slot with data (may be null).
+ */
+static inline unsigned char mas_data_end(struct ma_state *mas)
+{
+ enum maple_type type;
+ struct maple_node *node;
+ unsigned char offset;
+ unsigned long *pivots;
+
+ type = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ if (type == maple_arange_64)
+ return ma_meta_end(node, type);
+
+ pivots = ma_pivots(node, type);
+ if (unlikely(ma_dead_node(node)))
+ return 0;
+
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]))
+ return ma_meta_end(node, type);
+
+ if (likely(pivots[offset] == mas->max))
+ return offset;
+
+ return mt_pivots[type];
+}
+
+/*
+ * mas_leaf_max_gap() - Returns the largest gap in a leaf node
+ * @mas - the maple state
+ *
+ * Return: The maximum gap in the leaf.
+ */
+static unsigned long mas_leaf_max_gap(struct ma_state *mas)
+{
+ enum maple_type mt;
+ unsigned long pstart, gap, max_gap;
+ struct maple_node *mn;
+ unsigned long *pivots;
+ void __rcu **slots;
+ unsigned char i;
+ unsigned char max_piv;
+
+ mt = mte_node_type(mas->node);
+ mn = mas_mn(mas);
+ slots = ma_slots(mn, mt);
+ max_gap = 0;
+ if (unlikely(ma_is_dense(mt))) {
+ gap = 0;
+ for (i = 0; i < mt_slots[mt]; i++) {
+ if (slots[i]) {
+ if (gap > max_gap)
+ max_gap = gap;
+ gap = 0;
+ } else {
+ gap++;
+ }
+ }
+ if (gap > max_gap)
+ max_gap = gap;
+ return max_gap;
+ }
+
+ /*
+ * Check the first implied pivot optimizes the loop below and slot 1 may
+ * be skipped if there is a gap in slot 0.
+ */
+ pivots = ma_pivots(mn, mt);
+ if (likely(!slots[0])) {
+ max_gap = pivots[0] - mas->min + 1;
+ i = 2;
+ } else {
+ i = 1;
+ }
+
+ /* reduce max_piv as the special case is checked before the loop */
+ max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
+ /*
+ * Check end implied pivot which can only be a gap on the right most
+ * node.
+ */
+ if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
+ gap = ULONG_MAX - pivots[max_piv];
+ if (gap > max_gap)
+ max_gap = gap;
+ }
+
+ for (; i <= max_piv; i++) {
+ /* data == no gap. */
+ if (likely(slots[i]))
+ continue;
+
+ pstart = pivots[i - 1];
+ gap = pivots[i] - pstart;
+ if (gap > max_gap)
+ max_gap = gap;
+
+ /* There cannot be two gaps in a row. */
+ i++;
+ }
+ return max_gap;
+}
+
+/*
+ * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
+ * @node: The maple node
+ * @gaps: The pointer to the gaps
+ * @mt: The maple node type
+ * @*off: Pointer to store the offset location of the gap.
+ *
+ * Uses the metadata data end to scan backwards across set gaps.
+ *
+ * Return: The maximum gap value
+ */
+static inline unsigned long
+ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
+ unsigned char *off)
+{
+ unsigned char offset, i;
+ unsigned long max_gap = 0;
+
+ i = offset = ma_meta_end(node, mt);
+ do {
+ if (gaps[i] > max_gap) {
+ max_gap = gaps[i];
+ offset = i;
+ }
+ } while (i--);
+
+ *off = offset;
+ return max_gap;
+}
+
+/*
+ * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
+ * @mas: The maple state.
+ *
+ * Return: The gap value.
+ */
+static inline unsigned long mas_max_gap(struct ma_state *mas)
+{
+ unsigned long *gaps;
+ unsigned char offset;
+ enum maple_type mt;
+ struct maple_node *node;
+
+ mt = mte_node_type(mas->node);
+ if (ma_is_leaf(mt))
+ return mas_leaf_max_gap(mas);
+
+ node = mas_mn(mas);
+ MAS_BUG_ON(mas, mt != maple_arange_64);
+ offset = ma_meta_gap(node, mt);
+ gaps = ma_gaps(node, mt);
+ return gaps[offset];
+}
+
+/*
+ * mas_parent_gap() - Set the parent gap and any gaps above, as needed
+ * @mas: The maple state
+ * @offset: The gap offset in the parent to set
+ * @new: The new gap value.
+ *
+ * Set the parent gap then continue to set the gap upwards, using the metadata
+ * of the parent to see if it is necessary to check the node above.
+ */
+static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
+ unsigned long new)
+{
+ unsigned long meta_gap = 0;
+ struct maple_node *pnode;
+ struct maple_enode *penode;
+ unsigned long *pgaps;
+ unsigned char meta_offset;
+ enum maple_type pmt;
+
+ pnode = mte_parent(mas->node);
+ pmt = mas_parent_type(mas, mas->node);
+ penode = mt_mk_node(pnode, pmt);
+ pgaps = ma_gaps(pnode, pmt);
+
+ascend:
+ MAS_BUG_ON(mas, pmt != maple_arange_64);
+ meta_offset = ma_meta_gap(pnode, pmt);
+ meta_gap = pgaps[meta_offset];
+
+ pgaps[offset] = new;
+
+ if (meta_gap == new)
+ return;
+
+ if (offset != meta_offset) {
+ if (meta_gap > new)
+ return;
+
+ ma_set_meta_gap(pnode, pmt, offset);
+ } else if (new < meta_gap) {
+ new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
+ ma_set_meta_gap(pnode, pmt, meta_offset);
+ }
+
+ if (ma_is_root(pnode))
+ return;
+
+ /* Go to the parent node. */
+ pnode = mte_parent(penode);
+ pmt = mas_parent_type(mas, penode);
+ pgaps = ma_gaps(pnode, pmt);
+ offset = mte_parent_slot(penode);
+ penode = mt_mk_node(pnode, pmt);
+ goto ascend;
+}
+
+/*
+ * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
+ * @mas - the maple state.
+ */
+static inline void mas_update_gap(struct ma_state *mas)
+{
+ unsigned char pslot;
+ unsigned long p_gap;
+ unsigned long max_gap;
+
+ if (!mt_is_alloc(mas->tree))
+ return;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ max_gap = mas_max_gap(mas);
+
+ pslot = mte_parent_slot(mas->node);
+ p_gap = ma_gaps(mte_parent(mas->node),
+ mas_parent_type(mas, mas->node))[pslot];
+
+ if (p_gap != max_gap)
+ mas_parent_gap(mas, pslot, max_gap);
+}
+
+/*
+ * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
+ * @parent with the slot encoded.
+ * @mas - the maple state (for the tree)
+ * @parent - the maple encoded node containing the children.
+ */
+static inline void mas_adopt_children(struct ma_state *mas,
+ struct maple_enode *parent)
+{
+ enum maple_type type = mte_node_type(parent);
+ struct maple_node *node = mte_to_node(parent);
+ void __rcu **slots = ma_slots(node, type);
+ unsigned long *pivots = ma_pivots(node, type);
+ struct maple_enode *child;
+ unsigned char offset;
+
+ offset = ma_data_end(node, type, pivots, mas->max);
+ do {
+ child = mas_slot_locked(mas, slots, offset);
+ mas_set_parent(mas, child, parent, offset);
+ } while (offset--);
+}
+
+/*
+ * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
+ * node as dead.
+ * @mas - the maple state with the new node
+ * @old_enode - The old maple encoded node to replace.
+ */
+static inline void mas_put_in_tree(struct ma_state *mas,
+ struct maple_enode *old_enode)
+ __must_hold(mas->tree->ma_lock)
+{
+ unsigned char offset;
+ void __rcu **slots;
+
+ if (mte_is_root(mas->node)) {
+ mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ mas_set_height(mas);
+ } else {
+
+ offset = mte_parent_slot(mas->node);
+ slots = ma_slots(mte_parent(mas->node),
+ mas_parent_type(mas, mas->node));
+ rcu_assign_pointer(slots[offset], mas->node);
+ }
+
+ mte_set_node_dead(old_enode);
+}
+
+/*
+ * mas_replace_node() - Replace a node by putting it in the tree, marking it
+ * dead, and freeing it.
+ * the parent encoding to locate the maple node in the tree.
+ * @mas - the ma_state with @mas->node pointing to the new node.
+ * @old_enode - The old maple encoded node.
+ */
+static inline void mas_replace_node(struct ma_state *mas,
+ struct maple_enode *old_enode)
+ __must_hold(mas->tree->ma_lock)
+{
+ mas_put_in_tree(mas, old_enode);
+ mas_free(mas, old_enode);
+}
+
+/*
+ * mas_find_child() - Find a child who has the parent @mas->node.
+ * @mas: the maple state with the parent.
+ * @child: the maple state to store the child.
+ */
+static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
+ __must_hold(mas->tree->ma_lock)
+{
+ enum maple_type mt;
+ unsigned char offset;
+ unsigned char end;
+ unsigned long *pivots;
+ struct maple_enode *entry;
+ struct maple_node *node;
+ void __rcu **slots;
+
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ slots = ma_slots(node, mt);
+ pivots = ma_pivots(node, mt);
+ end = ma_data_end(node, mt, pivots, mas->max);
+ for (offset = mas->offset; offset <= end; offset++) {
+ entry = mas_slot_locked(mas, slots, offset);
+ if (mte_parent(entry) == node) {
+ *child = *mas;
+ mas->offset = offset + 1;
+ child->offset = offset;
+ mas_descend(child);
+ child->offset = 0;
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
+ * old data or set b_node->b_end.
+ * @b_node: the maple_big_node
+ * @shift: the shift count
+ */
+static inline void mab_shift_right(struct maple_big_node *b_node,
+ unsigned char shift)
+{
+ unsigned long size = b_node->b_end * sizeof(unsigned long);
+
+ memmove(b_node->pivot + shift, b_node->pivot, size);
+ memmove(b_node->slot + shift, b_node->slot, size);
+ if (b_node->type == maple_arange_64)
+ memmove(b_node->gap + shift, b_node->gap, size);
+}
+
+/*
+ * mab_middle_node() - Check if a middle node is needed (unlikely)
+ * @b_node: the maple_big_node that contains the data.
+ * @size: the amount of data in the b_node
+ * @split: the potential split location
+ * @slot_count: the size that can be stored in a single node being considered.
+ *
+ * Return: true if a middle node is required.
+ */
+static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
+ unsigned char slot_count)
+{
+ unsigned char size = b_node->b_end;
+
+ if (size >= 2 * slot_count)
+ return true;
+
+ if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
+ return true;
+
+ return false;
+}
+
+/*
+ * mab_no_null_split() - ensure the split doesn't fall on a NULL
+ * @b_node: the maple_big_node with the data
+ * @split: the suggested split location
+ * @slot_count: the number of slots in the node being considered.
+ *
+ * Return: the split location.
+ */
+static inline int mab_no_null_split(struct maple_big_node *b_node,
+ unsigned char split, unsigned char slot_count)
+{
+ if (!b_node->slot[split]) {
+ /*
+ * If the split is less than the max slot && the right side will
+ * still be sufficient, then increment the split on NULL.
+ */
+ if ((split < slot_count - 1) &&
+ (b_node->b_end - split) > (mt_min_slots[b_node->type]))
+ split++;
+ else
+ split--;
+ }
+ return split;
+}
+
+/*
+ * mab_calc_split() - Calculate the split location and if there needs to be two
+ * splits.
+ * @bn: The maple_big_node with the data
+ * @mid_split: The second split, if required. 0 otherwise.
+ *
+ * Return: The first split location. The middle split is set in @mid_split.
+ */
+static inline int mab_calc_split(struct ma_state *mas,
+ struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+{
+ unsigned char b_end = bn->b_end;
+ int split = b_end / 2; /* Assume equal split. */
+ unsigned char slot_min, slot_count = mt_slots[bn->type];
+
+ /*
+ * To support gap tracking, all NULL entries are kept together and a node cannot
+ * end on a NULL entry, with the exception of the left-most leaf. The
+ * limitation means that the split of a node must be checked for this condition
+ * and be able to put more data in one direction or the other.
+ */
+ if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
+ *mid_split = 0;
+ split = b_end - mt_min_slots[bn->type];
+
+ if (!ma_is_leaf(bn->type))
+ return split;
+
+ mas->mas_flags |= MA_STATE_REBALANCE;
+ if (!bn->slot[split])
+ split--;
+ return split;
+ }
+
+ /*
+ * Although extremely rare, it is possible to enter what is known as the 3-way
+ * split scenario. The 3-way split comes about by means of a store of a range
+ * that overwrites the end and beginning of two full nodes. The result is a set
+ * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
+ * also be located in different parent nodes which are also full. This can
+ * carry upwards all the way to the root in the worst case.
+ */
+ if (unlikely(mab_middle_node(bn, split, slot_count))) {
+ split = b_end / 3;
+ *mid_split = split * 2;
+ } else {
+ slot_min = mt_min_slots[bn->type];
+
+ *mid_split = 0;
+ /*
+ * Avoid having a range less than the slot count unless it
+ * causes one node to be deficient.
+ * NOTE: mt_min_slots is 1 based, b_end and split are zero.
+ */
+ while ((split < slot_count - 1) &&
+ ((bn->pivot[split] - min) < slot_count - 1) &&
+ (b_end - split > slot_min))
+ split++;
+ }
+
+ /* Avoid ending a node on a NULL entry */
+ split = mab_no_null_split(bn, split, slot_count);
+
+ if (unlikely(*mid_split))
+ *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
+
+ return split;
+}
+
+/*
+ * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
+ * and set @b_node->b_end to the next free slot.
+ * @mas: The maple state
+ * @mas_start: The starting slot to copy
+ * @mas_end: The end slot to copy (inclusively)
+ * @b_node: The maple_big_node to place the data
+ * @mab_start: The starting location in maple_big_node to store the data.
+ */
+static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
+ unsigned char mas_end, struct maple_big_node *b_node,
+ unsigned char mab_start)
+{
+ enum maple_type mt;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots, *gaps;
+ int i = mas_start, j = mab_start;
+ unsigned char piv_end;
+
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ if (!i) {
+ b_node->pivot[j] = pivots[i++];
+ if (unlikely(i > mas_end))
+ goto complete;
+ j++;
+ }
+
+ piv_end = min(mas_end, mt_pivots[mt]);
+ for (; i < piv_end; i++, j++) {
+ b_node->pivot[j] = pivots[i];
+ if (unlikely(!b_node->pivot[j]))
+ break;
+
+ if (unlikely(mas->max == b_node->pivot[j]))
+ goto complete;
+ }
+
+ if (likely(i <= mas_end))
+ b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
+
+complete:
+ b_node->b_end = ++j;
+ j -= mab_start;
+ slots = ma_slots(node, mt);
+ memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
+ if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
+ gaps = ma_gaps(node, mt);
+ memcpy(b_node->gap + mab_start, gaps + mas_start,
+ sizeof(unsigned long) * j);
+ }
+}
+
+/*
+ * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
+ * @mas: The maple state
+ * @node: The maple node
+ * @pivots: pointer to the maple node pivots
+ * @mt: The maple type
+ * @end: The assumed end
+ *
+ * Note, end may be incremented within this function but not modified at the
+ * source. This is fine since the metadata is the last thing to be stored in a
+ * node during a write.
+ */
+static inline void mas_leaf_set_meta(struct ma_state *mas,
+ struct maple_node *node, unsigned long *pivots,
+ enum maple_type mt, unsigned char end)
+{
+ /* There is no room for metadata already */
+ if (mt_pivots[mt] <= end)
+ return;
+
+ if (pivots[end] && pivots[end] < mas->max)
+ end++;
+
+ if (end < mt_slots[mt] - 1)
+ ma_set_meta(node, mt, 0, end);
+}
+
+/*
+ * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
+ * @b_node: the maple_big_node that has the data
+ * @mab_start: the start location in @b_node.
+ * @mab_end: The end location in @b_node (inclusively)
+ * @mas: The maple state with the maple encoded node.
+ */
+static inline void mab_mas_cp(struct maple_big_node *b_node,
+ unsigned char mab_start, unsigned char mab_end,
+ struct ma_state *mas, bool new_max)
+{
+ int i, j = 0;
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mte_to_node(mas->node);
+ void __rcu **slots = ma_slots(node, mt);
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned long *gaps = NULL;
+ unsigned char end;
+
+ if (mab_end - mab_start > mt_pivots[mt])
+ mab_end--;
+
+ if (!pivots[mt_pivots[mt] - 1])
+ slots[mt_pivots[mt]] = NULL;
+
+ i = mab_start;
+ do {
+ pivots[j++] = b_node->pivot[i++];
+ } while (i <= mab_end && likely(b_node->pivot[i]));
+
+ memcpy(slots, b_node->slot + mab_start,
+ sizeof(void *) * (i - mab_start));
+
+ if (new_max)
+ mas->max = b_node->pivot[i - 1];
+
+ end = j - 1;
+ if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
+ unsigned long max_gap = 0;
+ unsigned char offset = 0;
+
+ gaps = ma_gaps(node, mt);
+ do {
+ gaps[--j] = b_node->gap[--i];
+ if (gaps[j] > max_gap) {
+ offset = j;
+ max_gap = gaps[j];
+ }
+ } while (j);
+
+ ma_set_meta(node, mt, offset, end);
+ } else {
+ mas_leaf_set_meta(mas, node, pivots, mt, end);
+ }
+}
+
+/*
+ * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
+ * @mas: The maple state
+ * @end: The maple node end
+ * @mt: The maple node type
+ */
+static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
+ enum maple_type mt)
+{
+ if (!(mas->mas_flags & MA_STATE_BULK))
+ return;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ if (end > mt_min_slots[mt]) {
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ return;
+ }
+}
+
+/*
+ * mas_store_b_node() - Store an @entry into the b_node while also copying the
+ * data from a maple encoded node.
+ * @wr_mas: the maple write state
+ * @b_node: the maple_big_node to fill with data
+ * @offset_end: the offset to end copying
+ *
+ * Return: The actual end of the data stored in @b_node
+ */
+static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node, unsigned char offset_end)
+{
+ unsigned char slot;
+ unsigned char b_end;
+ /* Possible underflow of piv will wrap back to 0 before use. */
+ unsigned long piv;
+ struct ma_state *mas = wr_mas->mas;
+
+ b_node->type = wr_mas->type;
+ b_end = 0;
+ slot = mas->offset;
+ if (slot) {
+ /* Copy start data up to insert. */
+ mas_mab_cp(mas, 0, slot - 1, b_node, 0);
+ b_end = b_node->b_end;
+ piv = b_node->pivot[b_end - 1];
+ } else
+ piv = mas->min - 1;
+
+ if (piv + 1 < mas->index) {
+ /* Handle range starting after old range */
+ b_node->slot[b_end] = wr_mas->content;
+ if (!wr_mas->content)
+ b_node->gap[b_end] = mas->index - 1 - piv;
+ b_node->pivot[b_end++] = mas->index - 1;
+ }
+
+ /* Store the new entry. */
+ mas->offset = b_end;
+ b_node->slot[b_end] = wr_mas->entry;
+ b_node->pivot[b_end] = mas->last;
+
+ /* Appended. */
+ if (mas->last >= mas->max)
+ goto b_end;
+
+ /* Handle new range ending before old range ends */
+ piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
+ if (piv > mas->last) {
+ if (piv == ULONG_MAX)
+ mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
+
+ if (offset_end != slot)
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ offset_end);
+
+ b_node->slot[++b_end] = wr_mas->content;
+ if (!wr_mas->content)
+ b_node->gap[b_end] = piv - mas->last + 1;
+ b_node->pivot[b_end] = piv;
+ }
+
+ slot = offset_end + 1;
+ if (slot > wr_mas->node_end)
+ goto b_end;
+
+ /* Copy end data to the end of the node. */
+ mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
+ b_node->b_end--;
+ return;
+
+b_end:
+ b_node->b_end = b_end;
+}
+
+/*
+ * mas_prev_sibling() - Find the previous node with the same parent.
+ * @mas: the maple state
+ *
+ * Return: True if there is a previous sibling, false otherwise.
+ */
+static inline bool mas_prev_sibling(struct ma_state *mas)
+{
+ unsigned int p_slot = mte_parent_slot(mas->node);
+
+ if (mte_is_root(mas->node))
+ return false;
+
+ if (!p_slot)
+ return false;
+
+ mas_ascend(mas);
+ mas->offset = p_slot - 1;
+ mas_descend(mas);
+ return true;
+}
+
+/*
+ * mas_next_sibling() - Find the next node with the same parent.
+ * @mas: the maple state
+ *
+ * Return: true if there is a next sibling, false otherwise.
+ */
+static inline bool mas_next_sibling(struct ma_state *mas)
+{
+ MA_STATE(parent, mas->tree, mas->index, mas->last);
+
+ if (mte_is_root(mas->node))
+ return false;
+
+ parent = *mas;
+ mas_ascend(&parent);
+ parent.offset = mte_parent_slot(mas->node) + 1;
+ if (parent.offset > mas_data_end(&parent))
+ return false;
+
+ *mas = parent;
+ mas_descend(mas);
+ return true;
+}
+
+/*
+ * mte_node_or_node() - Return the encoded node or MAS_NONE.
+ * @enode: The encoded maple node.
+ *
+ * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
+ *
+ * Return: @enode or MAS_NONE
+ */
+static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+{
+ if (enode)
+ return enode;
+
+ return ma_enode_ptr(MAS_NONE);
+}
+
+/*
+ * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+ */
+static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char count, offset;
+
+ if (unlikely(ma_is_dense(wr_mas->type))) {
+ wr_mas->r_max = wr_mas->r_min = mas->index;
+ mas->offset = mas->index = mas->min;
+ return;
+ }
+
+ wr_mas->node = mas_mn(wr_mas->mas);
+ wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
+ count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
+ wr_mas->pivots, mas->max);
+ offset = mas->offset;
+
+ while (offset < count && mas->index > wr_mas->pivots[offset])
+ offset++;
+
+ wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
+ wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
+ wr_mas->offset_end = mas->offset = offset;
+}
+
+/*
+ * mast_rebalance_next() - Rebalance against the next node
+ * @mast: The maple subtree state
+ * @old_r: The encoded maple node to the right (next node).
+ */
+static inline void mast_rebalance_next(struct maple_subtree_state *mast)
+{
+ unsigned char b_end = mast->bn->b_end;
+
+ mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
+ mast->bn, b_end);
+ mast->orig_r->last = mast->orig_r->max;
+}
+
+/*
+ * mast_rebalance_prev() - Rebalance against the previous node
+ * @mast: The maple subtree state
+ * @old_l: The encoded maple node to the left (previous node)
+ */
+static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
+{
+ unsigned char end = mas_data_end(mast->orig_l) + 1;
+ unsigned char b_end = mast->bn->b_end;
+
+ mab_shift_right(mast->bn, end);
+ mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
+ mast->l->min = mast->orig_l->min;
+ mast->orig_l->index = mast->orig_l->min;
+ mast->bn->b_end = end + b_end;
+ mast->l->offset += end;
+}
+
+/*
+ * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
+ * the node to the right. Checking the nodes to the right then the left at each
+ * level upwards until root is reached.
+ * Data is copied into the @mast->bn.
+ * @mast: The maple_subtree_state.
+ */
+static inline
+bool mast_spanning_rebalance(struct maple_subtree_state *mast)
+{
+ struct ma_state r_tmp = *mast->orig_r;
+ struct ma_state l_tmp = *mast->orig_l;
+ unsigned char depth = 0;
+
+ r_tmp = *mast->orig_r;
+ l_tmp = *mast->orig_l;
+ do {
+ mas_ascend(mast->orig_r);
+ mas_ascend(mast->orig_l);
+ depth++;
+ if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
+ mast->orig_r->offset++;
+ do {
+ mas_descend(mast->orig_r);
+ mast->orig_r->offset = 0;
+ } while (--depth);
+
+ mast_rebalance_next(mast);
+ *mast->orig_l = l_tmp;
+ return true;
+ } else if (mast->orig_l->offset != 0) {
+ mast->orig_l->offset--;
+ do {
+ mas_descend(mast->orig_l);
+ mast->orig_l->offset =
+ mas_data_end(mast->orig_l);
+ } while (--depth);
+
+ mast_rebalance_prev(mast);
+ *mast->orig_r = r_tmp;
+ return true;
+ }
+ } while (!mte_is_root(mast->orig_r->node));
+
+ *mast->orig_r = r_tmp;
+ *mast->orig_l = l_tmp;
+ return false;
+}
+
+/*
+ * mast_ascend() - Ascend the original left and right maple states.
+ * @mast: the maple subtree state.
+ *
+ * Ascend the original left and right sides. Set the offsets to point to the
+ * data already in the new tree (@mast->l and @mast->r).
+ */
+static inline void mast_ascend(struct maple_subtree_state *mast)
+{
+ MA_WR_STATE(wr_mas, mast->orig_r, NULL);
+ mas_ascend(mast->orig_l);
+ mas_ascend(mast->orig_r);
+
+ mast->orig_r->offset = 0;
+ mast->orig_r->index = mast->r->max;
+ /* last should be larger than or equal to index */
+ if (mast->orig_r->last < mast->orig_r->index)
+ mast->orig_r->last = mast->orig_r->index;
+
+ wr_mas.type = mte_node_type(mast->orig_r->node);
+ mas_wr_node_walk(&wr_mas);
+ /* Set up the left side of things */
+ mast->orig_l->offset = 0;
+ mast->orig_l->index = mast->l->min;
+ wr_mas.mas = mast->orig_l;
+ wr_mas.type = mte_node_type(mast->orig_l->node);
+ mas_wr_node_walk(&wr_mas);
+
+ mast->bn->type = wr_mas.type;
+}
+
+/*
+ * mas_new_ma_node() - Create and return a new maple node. Helper function.
+ * @mas: the maple state with the allocations.
+ * @b_node: the maple_big_node with the type encoding.
+ *
+ * Use the node type from the maple_big_node to allocate a new node from the
+ * ma_state. This function exists mainly for code readability.
+ *
+ * Return: A new maple encoded node
+ */
+static inline struct maple_enode
+*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
+{
+ return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
+}
+
+/*
+ * mas_mab_to_node() - Set up right and middle nodes
+ *
+ * @mas: the maple state that contains the allocations.
+ * @b_node: the node which contains the data.
+ * @left: The pointer which will have the left node
+ * @right: The pointer which may have the right node
+ * @middle: the pointer which may have the middle node (rare)
+ * @mid_split: the split location for the middle node
+ *
+ * Return: the split of left.
+ */
+static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ struct maple_big_node *b_node, struct maple_enode **left,
+ struct maple_enode **right, struct maple_enode **middle,
+ unsigned char *mid_split, unsigned long min)
+{
+ unsigned char split = 0;
+ unsigned char slot_count = mt_slots[b_node->type];
+
+ *left = mas_new_ma_node(mas, b_node);
+ *right = NULL;
+ *middle = NULL;
+ *mid_split = 0;
+
+ if (b_node->b_end < slot_count) {
+ split = b_node->b_end;
+ } else {
+ split = mab_calc_split(mas, b_node, mid_split, min);
+ *right = mas_new_ma_node(mas, b_node);
+ }
+
+ if (*mid_split)
+ *middle = mas_new_ma_node(mas, b_node);
+
+ return split;
+
+}
+
+/*
+ * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
+ * pointer.
+ * @b_node - the big node to add the entry
+ * @mas - the maple state to get the pivot (mas->max)
+ * @entry - the entry to add, if NULL nothing happens.
+ */
+static inline void mab_set_b_end(struct maple_big_node *b_node,
+ struct ma_state *mas,
+ void *entry)
+{
+ if (!entry)
+ return;
+
+ b_node->slot[b_node->b_end] = entry;
+ if (mt_is_alloc(mas->tree))
+ b_node->gap[b_node->b_end] = mas_max_gap(mas);
+ b_node->pivot[b_node->b_end++] = mas->max;
+}
+
+/*
+ * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
+ * of @mas->node to either @left or @right, depending on @slot and @split
+ *
+ * @mas - the maple state with the node that needs a parent
+ * @left - possible parent 1
+ * @right - possible parent 2
+ * @slot - the slot the mas->node was placed
+ * @split - the split location between @left and @right
+ */
+static inline void mas_set_split_parent(struct ma_state *mas,
+ struct maple_enode *left,
+ struct maple_enode *right,
+ unsigned char *slot, unsigned char split)
+{
+ if (mas_is_none(mas))
+ return;
+
+ if ((*slot) <= split)
+ mas_set_parent(mas, mas->node, left, *slot);
+ else if (right)
+ mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
+
+ (*slot)++;
+}
+
+/*
+ * mte_mid_split_check() - Check if the next node passes the mid-split
+ * @**l: Pointer to left encoded maple node.
+ * @**m: Pointer to middle encoded maple node.
+ * @**r: Pointer to right encoded maple node.
+ * @slot: The offset
+ * @*split: The split location.
+ * @mid_split: The middle split.
+ */
+static inline void mte_mid_split_check(struct maple_enode **l,
+ struct maple_enode **r,
+ struct maple_enode *right,
+ unsigned char slot,
+ unsigned char *split,
+ unsigned char mid_split)
+{
+ if (*r == right)
+ return;
+
+ if (slot < mid_split)
+ return;
+
+ *l = *r;
+ *r = right;
+ *split = mid_split;
+}
+
+/*
+ * mast_set_split_parents() - Helper function to set three nodes parents. Slot
+ * is taken from @mast->l.
+ * @mast - the maple subtree state
+ * @left - the left node
+ * @right - the right node
+ * @split - the split location.
+ */
+static inline void mast_set_split_parents(struct maple_subtree_state *mast,
+ struct maple_enode *left,
+ struct maple_enode *middle,
+ struct maple_enode *right,
+ unsigned char split,
+ unsigned char mid_split)
+{
+ unsigned char slot;
+ struct maple_enode *l = left;
+ struct maple_enode *r = right;
+
+ if (mas_is_none(mast->l))
+ return;
+
+ if (middle)
+ r = middle;
+
+ slot = mast->l->offset;
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->l, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->m, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->r, l, r, &slot, split);
+}
+
+/*
+ * mas_topiary_node() - Dispose of a singe node
+ * @mas: The maple state for pushing nodes
+ * @enode: The encoded maple node
+ * @in_rcu: If the tree is in rcu mode
+ *
+ * The node will either be RCU freed or pushed back on the maple state.
+ */
+static inline void mas_topiary_node(struct ma_state *mas,
+ struct maple_enode *enode, bool in_rcu)
+{
+ struct maple_node *tmp;
+
+ if (enode == MAS_NONE)
+ return;
+
+ tmp = mte_to_node(enode);
+ mte_set_node_dead(enode);
+ if (in_rcu)
+ ma_free_rcu(tmp);
+ else
+ mas_push_node(mas, tmp);
+}
+
+/*
+ * mas_topiary_replace() - Replace the data with new data, then repair the
+ * parent links within the new tree. Iterate over the dead sub-tree and collect
+ * the dead subtrees and topiary the nodes that are no longer of use.
+ *
+ * The new tree will have up to three children with the correct parent. Keep
+ * track of the new entries as they need to be followed to find the next level
+ * of new entries.
+ *
+ * The old tree will have up to three children with the old parent. Keep track
+ * of the old entries as they may have more nodes below replaced. Nodes within
+ * [index, last] are dead subtrees, others need to be freed and followed.
+ *
+ * @mas: The maple state pointing at the new data
+ * @old_enode: The maple encoded node being replaced
+ *
+ */
+static inline void mas_topiary_replace(struct ma_state *mas,
+ struct maple_enode *old_enode)
+{
+ struct ma_state tmp[3], tmp_next[3];
+ MA_TOPIARY(subtrees, mas->tree);
+ bool in_rcu;
+ int i, n;
+
+ /* Place data in tree & then mark node as old */
+ mas_put_in_tree(mas, old_enode);
+
+ /* Update the parent pointers in the tree */
+ tmp[0] = *mas;
+ tmp[0].offset = 0;
+ tmp[1].node = MAS_NONE;
+ tmp[2].node = MAS_NONE;
+ while (!mte_is_leaf(tmp[0].node)) {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&tmp[i]))
+ continue;
+
+ while (n < 3) {
+ if (!mas_find_child(&tmp[i], &tmp_next[n]))
+ break;
+ n++;
+ }
+
+ mas_adopt_children(&tmp[i], tmp[i].node);
+ }
+
+ if (MAS_WARN_ON(mas, n == 0))
+ break;
+
+ while (n < 3)
+ tmp_next[n++].node = MAS_NONE;
+
+ for (i = 0; i < 3; i++)
+ tmp[i] = tmp_next[i];
+ }
+
+ /* Collect the old nodes that need to be discarded */
+ if (mte_is_leaf(old_enode))
+ return mas_free(mas, old_enode);
+
+ tmp[0] = *mas;
+ tmp[0].offset = 0;
+ tmp[0].node = old_enode;
+ tmp[1].node = MAS_NONE;
+ tmp[2].node = MAS_NONE;
+ in_rcu = mt_in_rcu(mas->tree);
+ do {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&tmp[i]))
+ continue;
+
+ while (n < 3) {
+ if (!mas_find_child(&tmp[i], &tmp_next[n]))
+ break;
+
+ if ((tmp_next[n].min >= tmp_next->index) &&
+ (tmp_next[n].max <= tmp_next->last)) {
+ mat_add(&subtrees, tmp_next[n].node);
+ tmp_next[n].node = MAS_NONE;
+ } else {
+ n++;
+ }
+ }
+ }
+
+ if (MAS_WARN_ON(mas, n == 0))
+ break;
+
+ while (n < 3)
+ tmp_next[n++].node = MAS_NONE;
+
+ for (i = 0; i < 3; i++) {
+ mas_topiary_node(mas, tmp[i].node, in_rcu);
+ tmp[i] = tmp_next[i];
+ }
+ } while (!mte_is_leaf(tmp[0].node));
+
+ for (i = 0; i < 3; i++)
+ mas_topiary_node(mas, tmp[i].node, in_rcu);
+
+ mas_mat_destroy(mas, &subtrees);
+}
+
+/*
+ * mas_wmb_replace() - Write memory barrier and replace
+ * @mas: The maple state
+ * @old: The old maple encoded node that is being replaced.
+ *
+ * Updates gap as necessary.
+ */
+static inline void mas_wmb_replace(struct ma_state *mas,
+ struct maple_enode *old_enode)
+{
+ /* Insert the new data in the tree */
+ mas_topiary_replace(mas, old_enode);
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ mas_update_gap(mas);
+}
+
+/*
+ * mast_cp_to_nodes() - Copy data out to nodes.
+ * @mast: The maple subtree state
+ * @left: The left encoded maple node
+ * @middle: The middle encoded maple node
+ * @right: The right encoded maple node
+ * @split: The location to split between left and (middle ? middle : right)
+ * @mid_split: The location to split between middle and right.
+ */
+static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
+ struct maple_enode *left, struct maple_enode *middle,
+ struct maple_enode *right, unsigned char split, unsigned char mid_split)
+{
+ bool new_lmax = true;
+
+ mast->l->node = mte_node_or_none(left);
+ mast->m->node = mte_node_or_none(middle);
+ mast->r->node = mte_node_or_none(right);
+
+ mast->l->min = mast->orig_l->min;
+ if (split == mast->bn->b_end) {
+ mast->l->max = mast->orig_r->max;
+ new_lmax = false;
+ }
+
+ mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
+
+ if (middle) {
+ mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
+ mast->m->min = mast->bn->pivot[split] + 1;
+ split = mid_split;
+ }
+
+ mast->r->max = mast->orig_r->max;
+ if (right) {
+ mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
+ mast->r->min = mast->bn->pivot[split] + 1;
+ }
+}
+
+/*
+ * mast_combine_cp_left - Copy in the original left side of the tree into the
+ * combined data set in the maple subtree state big node.
+ * @mast: The maple subtree state
+ */
+static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
+{
+ unsigned char l_slot = mast->orig_l->offset;
+
+ if (!l_slot)
+ return;
+
+ mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
+}
+
+/*
+ * mast_combine_cp_right: Copy in the original right side of the tree into the
+ * combined data set in the maple subtree state big node.
+ * @mast: The maple subtree state
+ */
+static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
+{
+ if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
+ return;
+
+ mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
+ mt_slot_count(mast->orig_r->node), mast->bn,
+ mast->bn->b_end);
+ mast->orig_r->last = mast->orig_r->max;
+}
+
+/*
+ * mast_sufficient: Check if the maple subtree state has enough data in the big
+ * node to create at least one sufficient node
+ * @mast: the maple subtree state
+ */
+static inline bool mast_sufficient(struct maple_subtree_state *mast)
+{
+ if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
+ return true;
+
+ return false;
+}
+
+/*
+ * mast_overflow: Check if there is too much data in the subtree state for a
+ * single node.
+ * @mast: The maple subtree state
+ */
+static inline bool mast_overflow(struct maple_subtree_state *mast)
+{
+ if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
+ return true;
+
+ return false;
+}
+
+static inline void *mtree_range_walk(struct ma_state *mas)
+{
+ unsigned long *pivots;
+ unsigned char offset;
+ struct maple_node *node;
+ struct maple_enode *next, *last;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char end;
+ unsigned long max, min;
+ unsigned long prev_max, prev_min;
+
+ next = mas->node;
+ min = mas->min;
+ max = mas->max;
+ do {
+ offset = 0;
+ last = next;
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ pivots = ma_pivots(node, type);
+ end = ma_data_end(node, type, pivots, max);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+
+ if (pivots[offset] >= mas->index) {
+ prev_max = max;
+ prev_min = min;
+ max = pivots[offset];
+ goto next;
+ }
+
+ do {
+ offset++;
+ } while ((offset < end) && (pivots[offset] < mas->index));
+
+ prev_min = min;
+ min = pivots[offset - 1] + 1;
+ prev_max = max;
+ if (likely(offset < end && pivots[offset]))
+ max = pivots[offset];
+
+next:
+ slots = ma_slots(node, type);
+ next = mt_slot(mas->tree, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ } while (!ma_is_leaf(type));
+
+ mas->offset = offset;
+ mas->index = min;
+ mas->last = max;
+ mas->min = prev_min;
+ mas->max = prev_max;
+ mas->node = last;
+ return (void *)next;
+
+dead_node:
+ mas_reset(mas);
+ return NULL;
+}
+
+/*
+ * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
+ * @mas: The starting maple state
+ * @mast: The maple_subtree_state, keeps track of 4 maple states.
+ * @count: The estimated count of iterations needed.
+ *
+ * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
+ * is hit. First @b_node is split into two entries which are inserted into the
+ * next iteration of the loop. @b_node is returned populated with the final
+ * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
+ * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
+ * to account of what has been copied into the new sub-tree. The update of
+ * orig_l_mas->last is used in mas_consume to find the slots that will need to
+ * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
+ * the new sub-tree in case the sub-tree becomes the full tree.
+ *
+ * Return: the number of elements in b_node during the last loop.
+ */
+static int mas_spanning_rebalance(struct ma_state *mas,
+ struct maple_subtree_state *mast, unsigned char count)
+{
+ unsigned char split, mid_split;
+ unsigned char slot = 0;
+ struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
+ struct maple_enode *old_enode;
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->index);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(m_mas, mas->tree, mas->index, mas->index);
+
+ /*
+ * The tree needs to be rebalanced and leaves need to be kept at the same level.
+ * Rebalancing is done by use of the ``struct maple_topiary``.
+ */
+ mast->l = &l_mas;
+ mast->m = &m_mas;
+ mast->r = &r_mas;
+ l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+
+ /* Check if this is not root and has sufficient data. */
+ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
+ unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
+ mast_spanning_rebalance(mast);
+
+ l_mas.depth = 0;
+
+ /*
+ * Each level of the tree is examined and balanced, pushing data to the left or
+ * right, or rebalancing against left or right nodes is employed to avoid
+ * rippling up the tree to limit the amount of churn. Once a new sub-section of
+ * the tree is created, there may be a mix of new and old nodes. The old nodes
+ * will have the incorrect parent pointers and currently be in two trees: the
+ * original tree and the partially new tree. To remedy the parent pointers in
+ * the old tree, the new data is swapped into the active tree and a walk down
+ * the tree is performed and the parent pointers are updated.
+ * See mas_topiary_replace() for more information.
+ */
+ while (count--) {
+ mast->bn->b_end--;
+ mast->bn->type = mte_node_type(mast->orig_l->node);
+ split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
+ &mid_split, mast->orig_l->min);
+ mast_set_split_parents(mast, left, middle, right, split,
+ mid_split);
+ mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+
+ /*
+ * Copy data from next level in the tree to mast->bn from next
+ * iteration
+ */
+ memset(mast->bn, 0, sizeof(struct maple_big_node));
+ mast->bn->type = mte_node_type(left);
+ l_mas.depth++;
+
+ /* Root already stored in l->node. */
+ if (mas_is_root_limits(mast->l))
+ goto new_root;
+
+ mast_ascend(mast);
+ mast_combine_cp_left(mast);
+ l_mas.offset = mast->bn->b_end;
+ mab_set_b_end(mast->bn, &l_mas, left);
+ mab_set_b_end(mast->bn, &m_mas, middle);
+ mab_set_b_end(mast->bn, &r_mas, right);
+
+ /* Copy anything necessary out of the right node. */
+ mast_combine_cp_right(mast);
+ mast->orig_l->last = mast->orig_l->max;
+
+ if (mast_sufficient(mast))
+ continue;
+
+ if (mast_overflow(mast))
+ continue;
+
+ /* May be a new root stored in mast->bn */
+ if (mas_is_root_limits(mast->orig_l))
+ break;
+
+ mast_spanning_rebalance(mast);
+
+ /* rebalancing from other nodes may require another loop. */
+ if (!count)
+ count++;
+ }
+
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
+ mte_node_type(mast->orig_l->node));
+ l_mas.depth++;
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
+ mas_set_parent(mas, left, l_mas.node, slot);
+ if (middle)
+ mas_set_parent(mas, middle, l_mas.node, ++slot);
+
+ if (right)
+ mas_set_parent(mas, right, l_mas.node, ++slot);
+
+ if (mas_is_root_limits(mast->l)) {
+new_root:
+ mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
+ while (!mte_is_root(mast->orig_l->node))
+ mast_ascend(mast);
+ } else {
+ mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
+ }
+
+ old_enode = mast->orig_l->node;
+ mas->depth = l_mas.depth;
+ mas->node = l_mas.node;
+ mas->min = l_mas.min;
+ mas->max = l_mas.max;
+ mas->offset = l_mas.offset;
+ mas_wmb_replace(mas, old_enode);
+ mtree_range_walk(mas);
+ return mast->bn->b_end;
+}
+
+/*
+ * mas_rebalance() - Rebalance a given node.
+ * @mas: The maple state
+ * @b_node: The big maple node.
+ *
+ * Rebalance two nodes into a single node or two new nodes that are sufficient.
+ * Continue upwards until tree is sufficient.
+ *
+ * Return: the number of elements in b_node during the last loop.
+ */
+static inline int mas_rebalance(struct ma_state *mas,
+ struct maple_big_node *b_node)
+{
+ char empty_count = mas_mt_height(mas);
+ struct maple_subtree_state mast;
+ unsigned char shift, b_end = ++b_node->b_end;
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+
+ trace_ma_op(__func__, mas);
+
+ /*
+ * Rebalancing occurs if a node is insufficient. Data is rebalanced
+ * against the node to the right if it exists, otherwise the node to the
+ * left of this node is rebalanced against this node. If rebalancing
+ * causes just one node to be produced instead of two, then the parent
+ * is also examined and rebalanced if it is insufficient. Every level
+ * tries to combine the data in the same way. If one node contains the
+ * entire range of the tree, then that node is used as a new root node.
+ */
+ mas_node_count(mas, empty_count * 2 - 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ mast.bn = b_node;
+ mast.bn->type = mte_node_type(mas->node);
+
+ l_mas = r_mas = *mas;
+
+ if (mas_next_sibling(&r_mas)) {
+ mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
+ r_mas.last = r_mas.index = r_mas.max;
+ } else {
+ mas_prev_sibling(&l_mas);
+ shift = mas_data_end(&l_mas) + 1;
+ mab_shift_right(b_node, shift);
+ mas->offset += shift;
+ mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
+ b_node->b_end = shift + b_end;
+ l_mas.index = l_mas.last = l_mas.min;
+ }
+
+ return mas_spanning_rebalance(mas, &mast, empty_count);
+}
+
+/*
+ * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
+ * state.
+ * @mas: The maple state
+ * @end: The end of the left-most node.
+ *
+ * During a mass-insert event (such as forking), it may be necessary to
+ * rebalance the left-most node when it is not sufficient.
+ */
+static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
+ struct maple_enode *eparent, *old_eparent;
+ unsigned char offset, tmp, split = mt_slots[mt] / 2;
+ void __rcu **l_slots, **slots;
+ unsigned long *l_pivs, *pivs, gap;
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+
+ l_mas = *mas;
+ mas_prev_sibling(&l_mas);
+
+ /* set up node. */
+ if (in_rcu) {
+ /* Allocate for both left and right as well as parent. */
+ mas_node_count(mas, 3);
+ if (mas_is_err(mas))
+ return;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ newnode = &reuse;
+ }
+
+ node = mas_mn(mas);
+ newnode->parent = node->parent;
+ slots = ma_slots(newnode, mt);
+ pivs = ma_pivots(newnode, mt);
+ left = mas_mn(&l_mas);
+ l_slots = ma_slots(left, mt);
+ l_pivs = ma_pivots(left, mt);
+ if (!l_slots[split])
+ split++;
+ tmp = mas_data_end(&l_mas) - split;
+
+ memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
+ memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
+ pivs[tmp] = l_mas.max;
+ memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
+ memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
+
+ l_mas.max = l_pivs[split];
+ mas->min = l_mas.max + 1;
+ old_eparent = mt_mk_node(mte_parent(l_mas.node),
+ mas_parent_type(&l_mas, l_mas.node));
+ tmp += end;
+ if (!in_rcu) {
+ unsigned char max_p = mt_pivots[mt];
+ unsigned char max_s = mt_slots[mt];
+
+ if (tmp < max_p)
+ memset(pivs + tmp, 0,
+ sizeof(unsigned long) * (max_p - tmp));
+
+ if (tmp < mt_slots[mt])
+ memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
+
+ memcpy(node, newnode, sizeof(struct maple_node));
+ ma_set_meta(node, mt, 0, tmp - 1);
+ mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
+ l_pivs[split]);
+
+ /* Remove data from l_pivs. */
+ tmp = split + 1;
+ memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
+ memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
+ ma_set_meta(left, mt, 0, split);
+ eparent = old_eparent;
+
+ goto done;
+ }
+
+ /* RCU requires replacing both l_mas, mas, and parent. */
+ mas->node = mt_mk_node(newnode, mt);
+ ma_set_meta(newnode, mt, 0, tmp);
+
+ new_left = mas_pop_node(mas);
+ new_left->parent = left->parent;
+ mt = mte_node_type(l_mas.node);
+ slots = ma_slots(new_left, mt);
+ pivs = ma_pivots(new_left, mt);
+ memcpy(slots, l_slots, sizeof(void *) * split);
+ memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
+ ma_set_meta(new_left, mt, 0, split);
+ l_mas.node = mt_mk_node(new_left, mt);
+
+ /* replace parent. */
+ offset = mte_parent_slot(mas->node);
+ mt = mas_parent_type(&l_mas, l_mas.node);
+ parent = mas_pop_node(mas);
+ slots = ma_slots(parent, mt);
+ pivs = ma_pivots(parent, mt);
+ memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
+ rcu_assign_pointer(slots[offset], mas->node);
+ rcu_assign_pointer(slots[offset - 1], l_mas.node);
+ pivs[offset - 1] = l_mas.max;
+ eparent = mt_mk_node(parent, mt);
+done:
+ gap = mas_leaf_max_gap(mas);
+ mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
+ gap = mas_leaf_max_gap(&l_mas);
+ mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
+ mas_ascend(mas);
+
+ if (in_rcu) {
+ mas_replace_node(mas, old_eparent);
+ mas_adopt_children(mas, mas->node);
+ }
+
+ mas_update_gap(mas);
+}
+
+/*
+ * mas_split_final_node() - Split the final node in a subtree operation.
+ * @mast: the maple subtree state
+ * @mas: The maple state
+ * @height: The height of the tree in case it's a new root.
+ */
+static inline bool mas_split_final_node(struct maple_subtree_state *mast,
+ struct ma_state *mas, int height)
+{
+ struct maple_enode *ancestor;
+
+ if (mte_is_root(mas->node)) {
+ if (mt_is_alloc(mas->tree))
+ mast->bn->type = maple_arange_64;
+ else
+ mast->bn->type = maple_range_64;
+ mas->depth = height;
+ }
+ /*
+ * Only a single node is used here, could be root.
+ * The Big_node data should just fit in a single node.
+ */
+ ancestor = mas_new_ma_node(mas, mast->bn);
+ mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
+ mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
+ mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
+
+ mast->l->node = ancestor;
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
+ mas->offset = mast->bn->b_end - 1;
+ return true;
+}
+
+/*
+ * mast_fill_bnode() - Copy data into the big node in the subtree state
+ * @mast: The maple subtree state
+ * @mas: the maple state
+ * @skip: The number of entries to skip for new nodes insertion.
+ */
+static inline void mast_fill_bnode(struct maple_subtree_state *mast,
+ struct ma_state *mas,
+ unsigned char skip)
+{
+ bool cp = true;
+ unsigned char split;
+
+ memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
+ memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
+ memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
+ mast->bn->b_end = 0;
+
+ if (mte_is_root(mas->node)) {
+ cp = false;
+ } else {
+ mas_ascend(mas);
+ mas->offset = mte_parent_slot(mas->node);
+ }
+
+ if (cp && mast->l->offset)
+ mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
+
+ split = mast->bn->b_end;
+ mab_set_b_end(mast->bn, mast->l, mast->l->node);
+ mast->r->offset = mast->bn->b_end;
+ mab_set_b_end(mast->bn, mast->r, mast->r->node);
+ if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
+ cp = false;
+
+ if (cp)
+ mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
+ mast->bn, mast->bn->b_end);
+
+ mast->bn->b_end--;
+ mast->bn->type = mte_node_type(mas->node);
+}
+
+/*
+ * mast_split_data() - Split the data in the subtree state big node into regular
+ * nodes.
+ * @mast: The maple subtree state
+ * @mas: The maple state
+ * @split: The location to split the big node
+ */
+static inline void mast_split_data(struct maple_subtree_state *mast,
+ struct ma_state *mas, unsigned char split)
+{
+ unsigned char p_slot;
+
+ mab_mas_cp(mast->bn, 0, split, mast->l, true);
+ mte_set_pivot(mast->r->node, 0, mast->r->max);
+ mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
+ mast->l->offset = mte_parent_slot(mas->node);
+ mast->l->max = mast->bn->pivot[split];
+ mast->r->min = mast->l->max + 1;
+ if (mte_is_leaf(mas->node))
+ return;
+
+ p_slot = mast->orig_l->offset;
+ mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
+ &p_slot, split);
+ mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
+ &p_slot, split);
+}
+
+/*
+ * mas_push_data() - Instead of splitting a node, it is beneficial to push the
+ * data to the right or left node if there is room.
+ * @mas: The maple state
+ * @height: The current height of the maple state
+ * @mast: The maple subtree state
+ * @left: Push left or not.
+ *
+ * Keeping the height of the tree low means faster lookups.
+ *
+ * Return: True if pushed, false otherwise.
+ */
+static inline bool mas_push_data(struct ma_state *mas, int height,
+ struct maple_subtree_state *mast, bool left)
+{
+ unsigned char slot_total = mast->bn->b_end;
+ unsigned char end, space, split;
+
+ MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
+ tmp_mas = *mas;
+ tmp_mas.depth = mast->l->depth;
+
+ if (left && !mas_prev_sibling(&tmp_mas))
+ return false;
+ else if (!left && !mas_next_sibling(&tmp_mas))
+ return false;
+
+ end = mas_data_end(&tmp_mas);
+ slot_total += end;
+ space = 2 * mt_slot_count(mas->node) - 2;
+ /* -2 instead of -1 to ensure there isn't a triple split */
+ if (ma_is_leaf(mast->bn->type))
+ space--;
+
+ if (mas->max == ULONG_MAX)
+ space--;
+
+ if (slot_total >= space)
+ return false;
+
+ /* Get the data; Fill mast->bn */
+ mast->bn->b_end++;
+ if (left) {
+ mab_shift_right(mast->bn, end + 1);
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
+ mast->bn->b_end = slot_total + 1;
+ } else {
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
+ }
+
+ /* Configure mast for splitting of mast->bn */
+ split = mt_slots[mast->bn->type] - 2;
+ if (left) {
+ /* Switch mas to prev node */
+ *mas = tmp_mas;
+ /* Start using mast->l for the left side. */
+ tmp_mas.node = mast->l->node;
+ *mast->l = tmp_mas;
+ } else {
+ tmp_mas.node = mast->r->node;
+ *mast->r = tmp_mas;
+ split = slot_total - split;
+ }
+ split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
+ /* Update parent slot for split calculation. */
+ if (left)
+ mast->orig_l->offset += end + 1;
+
+ mast_split_data(mast, mas, split);
+ mast_fill_bnode(mast, mas, 2);
+ mas_split_final_node(mast, mas, height + 1);
+ return true;
+}
+
+/*
+ * mas_split() - Split data that is too big for one node into two.
+ * @mas: The maple state
+ * @b_node: The maple big node
+ * Return: 1 on success, 0 on failure.
+ */
+static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+{
+ struct maple_subtree_state mast;
+ int height = 0;
+ unsigned char mid_split, split = 0;
+ struct maple_enode *old;
+
+ /*
+ * Splitting is handled differently from any other B-tree; the Maple
+ * Tree splits upwards. Splitting up means that the split operation
+ * occurs when the walk of the tree hits the leaves and not on the way
+ * down. The reason for splitting up is that it is impossible to know
+ * how much space will be needed until the leaf is (or leaves are)
+ * reached. Since overwriting data is allowed and a range could
+ * overwrite more than one range or result in changing one entry into 3
+ * entries, it is impossible to know if a split is required until the
+ * data is examined.
+ *
+ * Splitting is a balancing act between keeping allocations to a minimum
+ * and avoiding a 'jitter' event where a tree is expanded to make room
+ * for an entry followed by a contraction when the entry is removed. To
+ * accomplish the balance, there are empty slots remaining in both left
+ * and right nodes after a split.
+ */
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
+
+ trace_ma_op(__func__, mas);
+ mas->depth = mas_mt_height(mas);
+ /* Allocation failures will happen early. */
+ mas_node_count(mas, 1 + mas->depth * 2);
+ if (mas_is_err(mas))
+ return 0;
+
+ mast.l = &l_mas;
+ mast.r = &r_mas;
+ mast.orig_l = &prev_l_mas;
+ mast.orig_r = &prev_r_mas;
+ mast.bn = b_node;
+
+ while (height++ <= mas->depth) {
+ if (mt_slots[b_node->type] > b_node->b_end) {
+ mas_split_final_node(&mast, mas, height);
+ break;
+ }
+
+ l_mas = r_mas = *mas;
+ l_mas.node = mas_new_ma_node(mas, b_node);
+ r_mas.node = mas_new_ma_node(mas, b_node);
+ /*
+ * Another way that 'jitter' is avoided is to terminate a split up early if the
+ * left or right node has space to spare. This is referred to as "pushing left"
+ * or "pushing right" and is similar to the B* tree, except the nodes left or
+ * right can rarely be reused due to RCU, but the ripple upwards is halted which
+ * is a significant savings.
+ */
+ /* Try to push left. */
+ if (mas_push_data(mas, height, &mast, true))
+ break;
+
+ /* Try to push right. */
+ if (mas_push_data(mas, height, &mast, false))
+ break;
+
+ split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ mast_split_data(&mast, mas, split);
+ /*
+ * Usually correct, mab_mas_cp in the above call overwrites
+ * r->max.
+ */
+ mast.r->max = mas->max;
+ mast_fill_bnode(&mast, mas, 1);
+ prev_l_mas = *mast.l;
+ prev_r_mas = *mast.r;
+ }
+
+ /* Set the original node as dead */
+ old = mas->node;
+ mas->node = l_mas.node;
+ mas_wmb_replace(mas, old);
+ mtree_range_walk(mas);
+ return 1;
+}
+
+/*
+ * mas_reuse_node() - Reuse the node to store the data.
+ * @wr_mas: The maple write state
+ * @bn: The maple big node
+ * @end: The end of the data.
+ *
+ * Will always return false in RCU mode.
+ *
+ * Return: True if node was reused, false otherwise.
+ */
+static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *bn, unsigned char end)
+{
+ /* Need to be rcu safe. */
+ if (mt_in_rcu(wr_mas->mas->tree))
+ return false;
+
+ if (end > bn->b_end) {
+ int clear = mt_slots[wr_mas->type] - bn->b_end;
+
+ memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
+ memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
+ }
+ mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
+ return true;
+}
+
+/*
+ * mas_commit_b_node() - Commit the big node into the tree.
+ * @wr_mas: The maple write state
+ * @b_node: The maple big node
+ * @end: The end of the data.
+ */
+static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node, unsigned char end)
+{
+ struct maple_node *node;
+ struct maple_enode *old_enode;
+ unsigned char b_end = b_node->b_end;
+ enum maple_type b_type = b_node->type;
+
+ old_enode = wr_mas->mas->node;
+ if ((b_end < mt_min_slots[b_type]) &&
+ (!mte_is_root(old_enode)) &&
+ (mas_mt_height(wr_mas->mas) > 1))
+ return mas_rebalance(wr_mas->mas, b_node);
+
+ if (b_end >= mt_slots[b_type])
+ return mas_split(wr_mas->mas, b_node);
+
+ if (mas_reuse_node(wr_mas, b_node, end))
+ goto reuse_node;
+
+ mas_node_count(wr_mas->mas, 1);
+ if (mas_is_err(wr_mas->mas))
+ return 0;
+
+ node = mas_pop_node(wr_mas->mas);
+ node->parent = mas_mn(wr_mas->mas)->parent;
+ wr_mas->mas->node = mt_mk_node(node, b_type);
+ mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
+ mas_replace_node(wr_mas->mas, old_enode);
+reuse_node:
+ mas_update_gap(wr_mas->mas);
+ return 1;
+}
+
+/*
+ * mas_root_expand() - Expand a root to a node
+ * @mas: The maple state
+ * @entry: The entry to store into the tree
+ */
+static inline int mas_root_expand(struct ma_state *mas, void *entry)
+{
+ void *contents = mas_root_locked(mas);
+ enum maple_type type = maple_leaf_64;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots;
+ int slot = 0;
+
+ mas_node_count(mas, 1);
+ if (unlikely(mas_is_err(mas)))
+ return 0;
+
+ node = mas_pop_node(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ node->parent = ma_parent_ptr(mas_tree_parent(mas));
+ mas->node = mt_mk_node(node, type);
+
+ if (mas->index) {
+ if (contents) {
+ rcu_assign_pointer(slots[slot], contents);
+ if (likely(mas->index > 1))
+ slot++;
+ }
+ pivots[slot++] = mas->index - 1;
+ }
+
+ rcu_assign_pointer(slots[slot], entry);
+ mas->offset = slot;
+ pivots[slot] = mas->last;
+ if (mas->last != ULONG_MAX)
+ pivots[++slot] = ULONG_MAX;
+
+ mas->depth = 1;
+ mas_set_height(mas);
+ ma_set_meta(node, maple_leaf_64, 0, slot);
+ /* swap the new root into the tree */
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ return slot;
+}
+
+static inline void mas_store_root(struct ma_state *mas, void *entry)
+{
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ mas_root_expand(mas, entry);
+ else if (((unsigned long) (entry) & 3) == 2)
+ mas_root_expand(mas, entry);
+ else {
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->node = MAS_START;
+ }
+}
+
+/*
+ * mas_is_span_wr() - Check if the write needs to be treated as a write that
+ * spans the node.
+ * @mas: The maple state
+ * @piv: The pivot value being written
+ * @type: The maple node type
+ * @entry: The data to write
+ *
+ * Spanning writes are writes that start in one node and end in another OR if
+ * the write of a %NULL will cause the node to end with a %NULL.
+ *
+ * Return: True if this is a spanning write, false otherwise.
+ */
+static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
+{
+ unsigned long max = wr_mas->r_max;
+ unsigned long last = wr_mas->mas->last;
+ enum maple_type type = wr_mas->type;
+ void *entry = wr_mas->entry;
+
+ /* Contained in this pivot, fast path */
+ if (last < max)
+ return false;
+
+ if (ma_is_leaf(type)) {
+ max = wr_mas->mas->max;
+ if (last < max)
+ return false;
+ }
+
+ if (last == max) {
+ /*
+ * The last entry of leaf node cannot be NULL unless it is the
+ * rightmost node (writing ULONG_MAX), otherwise it spans slots.
+ */
+ if (entry || last == ULONG_MAX)
+ return false;
+ }
+
+ trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
+ return true;
+}
+
+static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
+{
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ mas_wr_node_walk(wr_mas);
+ wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+}
+
+static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
+{
+ wr_mas->mas->max = wr_mas->r_max;
+ wr_mas->mas->min = wr_mas->r_min;
+ wr_mas->mas->node = wr_mas->content;
+ wr_mas->mas->offset = 0;
+ wr_mas->mas->depth++;
+}
+/*
+ * mas_wr_walk() - Walk the tree for a write.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+ *
+ * Return: True if it's contained in a node, false on spanning write.
+ */
+static bool mas_wr_walk(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ if (unlikely(mas_is_span_wr(wr_mas)))
+ return false;
+
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+
+ mas_wr_walk_traverse(wr_mas);
+ }
+
+ return true;
+}
+
+static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+ mas_wr_walk_traverse(wr_mas);
+
+ }
+ return true;
+}
+/*
+ * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
+ * @l_wr_mas: The left maple write state
+ * @r_wr_mas: The right maple write state
+ */
+static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
+ struct ma_wr_state *r_wr_mas)
+{
+ struct ma_state *r_mas = r_wr_mas->mas;
+ struct ma_state *l_mas = l_wr_mas->mas;
+ unsigned char l_slot;
+
+ l_slot = l_mas->offset;
+ if (!l_wr_mas->content)
+ l_mas->index = l_wr_mas->r_min;
+
+ if ((l_mas->index == l_wr_mas->r_min) &&
+ (l_slot &&
+ !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
+ if (l_slot > 1)
+ l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
+ else
+ l_mas->index = l_mas->min;
+
+ l_mas->offset = l_slot - 1;
+ }
+
+ if (!r_wr_mas->content) {
+ if (r_mas->last < r_wr_mas->r_max)
+ r_mas->last = r_wr_mas->r_max;
+ r_mas->offset++;
+ } else if ((r_mas->last == r_wr_mas->r_max) &&
+ (r_mas->last < r_mas->max) &&
+ !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
+ r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
+ r_wr_mas->type, r_mas->offset + 1);
+ r_mas->offset++;
+ }
+}
+
+static inline void *mas_state_walk(struct ma_state *mas)
+{
+ void *entry;
+
+ entry = mas_start(mas);
+ if (mas_is_none(mas))
+ return NULL;
+
+ if (mas_is_ptr(mas))
+ return entry;
+
+ return mtree_range_walk(mas);
+}
+
+/*
+ * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
+ * to date.
+ *
+ * @mas: The maple state.
+ *
+ * Note: Leaves mas in undesirable state.
+ * Return: The entry for @mas->index or %NULL on dead node.
+ */
+static inline void *mtree_lookup_walk(struct ma_state *mas)
+{
+ unsigned long *pivots;
+ unsigned char offset;
+ struct maple_node *node;
+ struct maple_enode *next;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char end;
+ unsigned long max;
+
+ next = mas->node;
+ max = ULONG_MAX;
+ do {
+ offset = 0;
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ pivots = ma_pivots(node, type);
+ end = ma_data_end(node, type, pivots, max);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ do {
+ if (pivots[offset] >= mas->index) {
+ max = pivots[offset];
+ break;
+ }
+ } while (++offset < end);
+
+ slots = ma_slots(node, type);
+ next = mt_slot(mas->tree, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ } while (!ma_is_leaf(type));
+
+ return (void *)next;
+
+dead_node:
+ mas_reset(mas);
+ return NULL;
+}
+
+static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
+/*
+ * mas_new_root() - Create a new root node that only contains the entry passed
+ * in.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Only valid when the index == 0 and the last == ULONG_MAX
+ *
+ * Return 0 on error, 1 on success.
+ */
+static inline int mas_new_root(struct ma_state *mas, void *entry)
+{
+ struct maple_enode *root = mas_root_locked(mas);
+ enum maple_type type = maple_leaf_64;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots;
+
+ if (!entry && !mas->index && mas->last == ULONG_MAX) {
+ mas->depth = 0;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->node = MAS_START;
+ goto done;
+ }
+
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ node = mas_pop_node(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ node->parent = ma_parent_ptr(mas_tree_parent(mas));
+ mas->node = mt_mk_node(node, type);
+ rcu_assign_pointer(slots[0], entry);
+ pivots[0] = mas->last;
+ mas->depth = 1;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+
+done:
+ if (xa_is_node(root))
+ mte_destroy_walk(root, mas->tree);
+
+ return 1;
+}
+/*
+ * mas_wr_spanning_store() - Create a subtree with the store operation completed
+ * and new nodes where necessary, then place the sub-tree in the actual tree.
+ * Note that mas is expected to point to the node which caused the store to
+ * span.
+ * @wr_mas: The maple write state
+ *
+ * Return: 0 on error, positive on success.
+ */
+static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+{
+ struct maple_subtree_state mast;
+ struct maple_big_node b_node;
+ struct ma_state *mas;
+ unsigned char height;
+
+ /* Left and Right side of spanning store */
+ MA_STATE(l_mas, NULL, 0, 0);
+ MA_STATE(r_mas, NULL, 0, 0);
+ MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
+ MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
+
+ /*
+ * A store operation that spans multiple nodes is called a spanning
+ * store and is handled early in the store call stack by the function
+ * mas_is_span_wr(). When a spanning store is identified, the maple
+ * state is duplicated. The first maple state walks the left tree path
+ * to ``index``, the duplicate walks the right tree path to ``last``.
+ * The data in the two nodes are combined into a single node, two nodes,
+ * or possibly three nodes (see the 3-way split above). A ``NULL``
+ * written to the last entry of a node is considered a spanning store as
+ * a rebalance is required for the operation to complete and an overflow
+ * of data may happen.
+ */
+ mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ return mas_new_root(mas, wr_mas->entry);
+ /*
+ * Node rebalancing may occur due to this store, so there may be three new
+ * entries per level plus a new root.
+ */
+ height = mas_mt_height(mas);
+ mas_node_count(mas, 1 + height * 3);
+ if (mas_is_err(mas))
+ return 0;
+
+ /*
+ * Set up right side. Need to get to the next offset after the spanning
+ * store to ensure it's not NULL and to combine both the next node and
+ * the node with the start together.
+ */
+ r_mas = *mas;
+ /* Avoid overflow, walk to next slot in the tree. */
+ if (r_mas.last + 1)
+ r_mas.last++;
+
+ r_mas.index = r_mas.last;
+ mas_wr_walk_index(&r_wr_mas);
+ r_mas.last = r_mas.index = mas->last;
+
+ /* Set up left side. */
+ l_mas = *mas;
+ mas_wr_walk_index(&l_wr_mas);
+
+ if (!wr_mas->entry) {
+ mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
+ mas->offset = l_mas.offset;
+ mas->index = l_mas.index;
+ mas->last = l_mas.last = r_mas.last;
+ }
+
+ /* expanding NULLs may make this cover the entire range */
+ if (!l_mas.index && r_mas.last == ULONG_MAX) {
+ mas_set_range(mas, 0, ULONG_MAX);
+ return mas_new_root(mas, wr_mas->entry);
+ }
+
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ /* Copy l_mas and store the value in b_node. */
+ mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
+ /* Copy r_mas into b_node. */
+ if (r_mas.offset <= r_wr_mas.node_end)
+ mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
+ &b_node, b_node.b_end + 1);
+ else
+ b_node.b_end++;
+
+ /* Stop spanning searches by searching for just index. */
+ l_mas.index = l_mas.last = mas->index;
+
+ mast.bn = &b_node;
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ /* Combine l_mas and r_mas and split them up evenly again. */
+ return mas_spanning_rebalance(mas, &mast, height + 1);
+}
+
+/*
+ * mas_wr_node_store() - Attempt to store the value in a node
+ * @wr_mas: The maple write state
+ *
+ * Attempts to reuse the node, but may allocate.
+ *
+ * Return: True if stored, false otherwise
+ */
+static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
+{
+ struct ma_state *mas = wr_mas->mas;
+ void __rcu **dst_slots;
+ unsigned long *dst_pivots;
+ unsigned char dst_offset, offset_end = wr_mas->offset_end;
+ struct maple_node reuse, *newnode;
+ unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ /* Check if there is enough data. The room is enough. */
+ if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
+ !(mas->mas_flags & MA_STATE_BULK))
+ return false;
+
+ if (mas->last == wr_mas->end_piv)
+ offset_end++; /* don't copy this offset */
+ else if (unlikely(wr_mas->r_max == ULONG_MAX))
+ mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
+
+ /* set up node. */
+ if (in_rcu) {
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return false;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ memset(&reuse, 0, sizeof(struct maple_node));
+ newnode = &reuse;
+ }
+
+ newnode->parent = mas_mn(mas)->parent;
+ dst_pivots = ma_pivots(newnode, wr_mas->type);
+ dst_slots = ma_slots(newnode, wr_mas->type);
+ /* Copy from start to insert point */
+ memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
+ memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
+
+ /* Handle insert of new range starting after old range */
+ if (wr_mas->r_min < mas->index) {
+ rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
+ dst_pivots[mas->offset++] = mas->index - 1;
+ }
+
+ /* Store the new entry and range end. */
+ if (mas->offset < node_pivots)
+ dst_pivots[mas->offset] = mas->last;
+ rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
+
+ /*
+ * this range wrote to the end of the node or it overwrote the rest of
+ * the data
+ */
+ if (offset_end > wr_mas->node_end)
+ goto done;
+
+ dst_offset = mas->offset + 1;
+ /* Copy to the end of node if necessary. */
+ copy_size = wr_mas->node_end - offset_end + 1;
+ memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
+ sizeof(void *) * copy_size);
+ memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
+ sizeof(unsigned long) * (copy_size - 1));
+
+ if (new_end < node_pivots)
+ dst_pivots[new_end] = mas->max;
+
+done:
+ mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ if (in_rcu) {
+ struct maple_enode *old_enode = mas->node;
+
+ mas->node = mt_mk_node(newnode, wr_mas->type);
+ mas_replace_node(mas, old_enode);
+ } else {
+ memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
+ }
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_update_gap(mas);
+ return true;
+}
+
+/*
+ * mas_wr_slot_store: Attempt to store a value in a slot.
+ * @wr_mas: the maple write state
+ *
+ * Return: True if stored, false otherwise
+ */
+static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char offset = mas->offset;
+ void __rcu **slots = wr_mas->slots;
+ bool gap = false;
+
+ gap |= !mt_slot_locked(mas->tree, slots, offset);
+ gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
+
+ if (wr_mas->offset_end - offset == 1) {
+ if (mas->index == wr_mas->r_min) {
+ /* Overwriting the range and a part of the next one */
+ rcu_assign_pointer(slots[offset], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->last;
+ } else {
+ /* Overwriting a part of the range and the next one */
+ rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ mas->offset++; /* Keep mas accurate. */
+ }
+ } else if (!mt_in_rcu(mas->tree)) {
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges
+ */
+ gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
+ rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ wr_mas->pivots[offset + 1] = mas->last;
+ mas->offset++; /* Keep mas accurate. */
+ } else {
+ return false;
+ }
+
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ /*
+ * Only update gap when the new entry is empty or there is an empty
+ * entry in the original two ranges.
+ */
+ if (!wr_mas->entry || gap)
+ mas_update_gap(mas);
+
+ return true;
+}
+
+static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ if (!wr_mas->slots[wr_mas->offset_end]) {
+ /* If this one is null, the next and prev are not */
+ mas->last = wr_mas->end_piv;
+ } else {
+ /* Check next slot(s) if we are overwriting the end */
+ if ((mas->last == wr_mas->end_piv) &&
+ (wr_mas->node_end != wr_mas->offset_end) &&
+ !wr_mas->slots[wr_mas->offset_end + 1]) {
+ wr_mas->offset_end++;
+ if (wr_mas->offset_end == wr_mas->node_end)
+ mas->last = mas->max;
+ else
+ mas->last = wr_mas->pivots[wr_mas->offset_end];
+ wr_mas->end_piv = mas->last;
+ }
+ }
+
+ if (!wr_mas->content) {
+ /* If this one is null, the next and prev are not */
+ mas->index = wr_mas->r_min;
+ } else {
+ /* Check prev slot if we are overwriting the start */
+ if (mas->index == wr_mas->r_min && mas->offset &&
+ !wr_mas->slots[mas->offset - 1]) {
+ mas->offset--;
+ wr_mas->r_min = mas->index =
+ mas_safe_min(mas, wr_mas->pivots, mas->offset);
+ wr_mas->r_max = wr_mas->pivots[mas->offset];
+ }
+ }
+}
+
+static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
+{
+ while ((wr_mas->offset_end < wr_mas->node_end) &&
+ (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
+ wr_mas->offset_end++;
+
+ if (wr_mas->offset_end < wr_mas->node_end)
+ wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
+ else
+ wr_mas->end_piv = wr_mas->mas->max;
+
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+}
+
+static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end = wr_mas->node_end + 2;
+
+ new_end -= wr_mas->offset_end - mas->offset;
+ if (wr_mas->r_min == mas->index)
+ new_end--;
+
+ if (wr_mas->end_piv == mas->last)
+ new_end--;
+
+ return new_end;
+}
+
+/*
+ * mas_wr_append: Attempt to append
+ * @wr_mas: the maple write state
+ * @new_end: The end of the node after the modification
+ *
+ * This is currently unsafe in rcu mode since the end of the node may be cached
+ * by readers while the node contents may be updated which could result in
+ * inaccurate information.
+ *
+ * Return: True if appended, false otherwise
+ */
+static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
+ unsigned char new_end)
+{
+ struct ma_state *mas;
+ void __rcu **slots;
+ unsigned char end;
+
+ mas = wr_mas->mas;
+ if (mt_in_rcu(mas->tree))
+ return false;
+
+ if (mas->offset != wr_mas->node_end)
+ return false;
+
+ end = wr_mas->node_end;
+ if (mas->offset != end)
+ return false;
+
+ if (new_end < mt_pivots[wr_mas->type]) {
+ wr_mas->pivots[new_end] = wr_mas->pivots[end];
+ ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
+ }
+
+ slots = wr_mas->slots;
+ if (new_end == end + 1) {
+ if (mas->last == wr_mas->r_max) {
+ /* Append to end of range */
+ rcu_assign_pointer(slots[new_end], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = new_end;
+ } else {
+ /* Append to start of range */
+ rcu_assign_pointer(slots[new_end], wr_mas->content);
+ wr_mas->pivots[end] = mas->last;
+ rcu_assign_pointer(slots[end], wr_mas->entry);
+ }
+ } else {
+ /* Append to the range without touching any boundaries. */
+ rcu_assign_pointer(slots[new_end], wr_mas->content);
+ wr_mas->pivots[end + 1] = mas->last;
+ rcu_assign_pointer(slots[end + 1], wr_mas->entry);
+ wr_mas->pivots[end] = mas->index - 1;
+ mas->offset = end + 1;
+ }
+
+ if (!wr_mas->content || !wr_mas->entry)
+ mas_update_gap(mas);
+
+ trace_ma_write(__func__, mas, new_end, wr_mas->entry);
+ return true;
+}
+
+/*
+ * mas_wr_bnode() - Slow path for a modification.
+ * @wr_mas: The write maple state
+ *
+ * This is where split, rebalance end up.
+ */
+static void mas_wr_bnode(struct ma_wr_state *wr_mas)
+{
+ struct maple_big_node b_node;
+
+ trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
+ mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
+}
+
+static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char new_end;
+
+ /* Direct replacement */
+ if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
+ if (!!wr_mas->entry ^ !!wr_mas->content)
+ mas_update_gap(mas);
+ return;
+ }
+
+ /*
+ * new_end exceeds the size of the maple node and cannot enter the fast
+ * path.
+ */
+ new_end = mas_wr_new_end(wr_mas);
+ if (new_end >= mt_slots[wr_mas->type])
+ goto slow_path;
+
+ /* Attempt to append */
+ if (mas_wr_append(wr_mas, new_end))
+ return;
+
+ if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
+ return;
+
+ if (mas_wr_node_store(wr_mas, new_end))
+ return;
+
+ if (mas_is_err(mas))
+ return;
+
+slow_path:
+ mas_wr_bnode(wr_mas);
+}
+
+/*
+ * mas_wr_store_entry() - Internal call to store a value
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Return: The contents that was stored at the index.
+ */
+static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ wr_mas->content = mas_start(mas);
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_store_root(mas, wr_mas->entry);
+ return wr_mas->content;
+ }
+
+ if (unlikely(!mas_wr_walk(wr_mas))) {
+ mas_wr_spanning_store(wr_mas);
+ return wr_mas->content;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ mas_wr_end_piv(wr_mas);
+ /* New root for a single pointer */
+ if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ mas_new_root(mas, wr_mas->entry);
+ return wr_mas->content;
+ }
+
+ mas_wr_modify(wr_mas);
+ return wr_mas->content;
+}
+
+/**
+ * mas_insert() - Internal call to insert a value
+ * @mas: The maple state
+ * @entry: The entry to store
+ *
+ * Return: %NULL or the contents that already exists at the requested index
+ * otherwise. The maple state needs to be checked for error conditions.
+ */
+static inline void *mas_insert(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ /*
+ * Inserting a new range inserts either 0, 1, or 2 pivots within the
+ * tree. If the insert fits exactly into an existing gap with a value
+ * of NULL, then the slot only needs to be written with the new value.
+ * If the range being inserted is adjacent to another range, then only a
+ * single pivot needs to be inserted (as well as writing the entry). If
+ * the new range is within a gap but does not touch any other ranges,
+ * then two pivots need to be inserted: the start - 1, and the end. As
+ * usual, the entry must be written. Most operations require a new node
+ * to be allocated and replace an existing node to ensure RCU safety,
+ * when in RCU mode. The exception to requiring a newly allocated node
+ * is when inserting at the end of a node (appending). When done
+ * carefully, appending can reuse the node in place.
+ */
+ wr_mas.content = mas_start(mas);
+ if (wr_mas.content)
+ goto exists;
+
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_store_root(mas, entry);
+ return NULL;
+ }
+
+ /* spanning writes always overwrite something */
+ if (!mas_wr_walk(&wr_mas))
+ goto exists;
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
+
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
+
+ if (!entry)
+ return NULL;
+
+ mas_wr_modify(&wr_mas);
+ return wr_mas.content;
+
+exists:
+ mas_set_err(mas, -EEXIST);
+ return wr_mas.content;
+
+}
+
+static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+{
+retry:
+ mas_set(mas, index);
+ mas_state_walk(mas);
+ if (mas_is_start(mas))
+ goto retry;
+}
+
+static inline bool mas_rewalk_if_dead(struct ma_state *mas,
+ struct maple_node *node, const unsigned long index)
+{
+ if (unlikely(ma_dead_node(node))) {
+ mas_rewalk(mas, index);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * mas_prev_node() - Find the prev non-null entry at the same level in the
+ * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
+ * @mas: The maple state
+ * @min: The lower limit to search
+ *
+ * The prev node value will be mas->node[mas->offset] or MAS_NONE.
+ * Return: 1 if the node is dead, 0 otherwise.
+ */
+static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+{
+ enum maple_type mt;
+ int offset, level;
+ void __rcu **slots;
+ struct maple_node *node;
+ unsigned long *pivots;
+ unsigned long max;
+
+ node = mas_mn(mas);
+ if (!mas->min)
+ goto no_entry;
+
+ max = mas->min - 1;
+ if (max < min)
+ goto no_entry;
+
+ level = 0;
+ do {
+ if (ma_is_root(node))
+ goto no_entry;
+
+ /* Walk up. */
+ if (unlikely(mas_ascend(mas)))
+ return 1;
+ offset = mas->offset;
+ level++;
+ node = mas_mn(mas);
+ } while (!offset);
+
+ offset--;
+ mt = mte_node_type(mas->node);
+ while (level > 1) {
+ level--;
+ slots = ma_slots(node, mt);
+ mas->node = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ pivots = ma_pivots(node, mt);
+ offset = ma_data_end(node, mt, pivots, max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+ }
+
+ slots = ma_slots(node, mt);
+ mas->node = mas_slot(mas, slots, offset);
+ pivots = ma_pivots(node, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ if (likely(offset))
+ mas->min = pivots[offset - 1] + 1;
+ mas->max = max;
+ mas->offset = mas_data_end(mas);
+ if (unlikely(mte_dead_node(mas->node)))
+ return 1;
+
+ return 0;
+
+no_entry:
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = MAS_NONE;
+ return 0;
+}
+
+/*
+ * mas_prev_slot() - Get the entry in the previous slot
+ *
+ * @mas: The maple state
+ * @max: The minimum starting range
+ * @empty: Can be empty
+ * @set_underflow: Set the @mas->node to underflow state on limit.
+ *
+ * Return: The entry in the previous slot which is possibly NULL
+ */
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
+ bool set_underflow)
+{
+ void *entry;
+ void __rcu **slots;
+ unsigned long pivot;
+ enum maple_type type;
+ unsigned long *pivots;
+ struct maple_node *node;
+ unsigned long save_point = mas->index;
+
+retry:
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (mas->min <= min) {
+ pivot = mas_safe_min(mas, pivots, mas->offset);
+
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (pivot <= min)
+ goto underflow;
+ }
+
+again:
+ if (likely(mas->offset)) {
+ mas->offset--;
+ mas->last = mas->index - 1;
+ mas->index = mas_safe_min(mas, pivots, mas->offset);
+ } else {
+ if (mas_prev_node(mas, min)) {
+ mas_rewalk(mas, save_point);
+ goto retry;
+ }
+
+ if (mas_is_none(mas))
+ goto underflow;
+
+ mas->last = mas->max;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ mas->index = pivots[mas->offset - 1] + 1;
+ }
+
+ slots = ma_slots(node, type);
+ entry = mas_slot(mas, slots, mas->offset);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (likely(entry))
+ return entry;
+
+ if (!empty) {
+ if (mas->index <= min)
+ goto underflow;
+
+ goto again;
+ }
+
+ return entry;
+
+underflow:
+ if (set_underflow)
+ mas->node = MAS_UNDERFLOW;
+ return NULL;
+}
+
+/*
+ * mas_next_node() - Get the next node at the same level in the tree.
+ * @mas: The maple state
+ * @max: The maximum pivot value to check.
+ *
+ * The next value will be mas->node[mas->offset] or MAS_NONE.
+ * Return: 1 on dead node, 0 otherwise.
+ */
+static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ unsigned long max)
+{
+ unsigned long min;
+ unsigned long *pivots;
+ struct maple_enode *enode;
+ int level = 0;
+ unsigned char node_end;
+ enum maple_type mt;
+ void __rcu **slots;
+
+ if (mas->max >= max)
+ goto no_entry;
+
+ min = mas->max + 1;
+ level = 0;
+ do {
+ if (ma_is_root(node))
+ goto no_entry;
+
+ /* Walk up. */
+ if (unlikely(mas_ascend(mas)))
+ return 1;
+
+ level++;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ node_end = ma_data_end(node, mt, pivots, mas->max);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ } while (unlikely(mas->offset == node_end));
+
+ slots = ma_slots(node, mt);
+ mas->offset++;
+ enode = mas_slot(mas, slots, mas->offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ if (level > 1)
+ mas->offset = 0;
+
+ while (unlikely(level > 1)) {
+ level--;
+ mas->node = enode;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ slots = ma_slots(node, mt);
+ enode = mas_slot(mas, slots, 0);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+ }
+
+ if (!mas->offset)
+ pivots = ma_pivots(node, mt);
+
+ mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = enode;
+ mas->min = min;
+ return 0;
+
+no_entry:
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = MAS_NONE;
+ return 0;
+}
+
+/*
+ * mas_next_slot() - Get the entry in the next slot
+ *
+ * @mas: The maple state
+ * @max: The maximum starting range
+ * @empty: Can be empty
+ * @set_overflow: Should @mas->node be set to overflow when the limit is
+ * reached.
+ *
+ * Return: The entry in the next slot which is possibly NULL
+ */
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
+ bool set_overflow)
+{
+ void __rcu **slots;
+ unsigned long *pivots;
+ unsigned long pivot;
+ enum maple_type type;
+ struct maple_node *node;
+ unsigned char data_end;
+ unsigned long save_point = mas->last;
+ void *entry;
+
+retry:
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ data_end = ma_data_end(node, type, pivots, mas->max);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (mas->max >= max) {
+ if (likely(mas->offset < data_end))
+ pivot = pivots[mas->offset];
+ else
+ goto overflow;
+
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (pivot >= max)
+ goto overflow;
+ }
+
+ if (likely(mas->offset < data_end)) {
+ mas->index = pivots[mas->offset] + 1;
+again:
+ mas->offset++;
+ if (likely(mas->offset < data_end))
+ mas->last = pivots[mas->offset];
+ else
+ mas->last = mas->max;
+ } else {
+ if (mas_next_node(mas, node, max)) {
+ mas_rewalk(mas, save_point);
+ goto retry;
+ }
+
+ if (WARN_ON_ONCE(mas_is_none(mas))) {
+ mas->node = MAS_OVERFLOW;
+ return NULL;
+ goto overflow;
+ }
+
+ mas->offset = 0;
+ mas->index = mas->min;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ mas->last = pivots[0];
+ }
+
+ slots = ma_slots(node, type);
+ entry = mt_slot(mas->tree, slots, mas->offset);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (entry)
+ return entry;
+
+ if (!empty) {
+ if (mas->last >= max)
+ goto overflow;
+
+ mas->index = mas->last + 1;
+ /* Node cannot end on NULL, so it's safe to short-cut here */
+ goto again;
+ }
+
+ return entry;
+
+overflow:
+ if (set_overflow)
+ mas->node = MAS_OVERFLOW;
+ return NULL;
+}
+
+/*
+ * mas_next_entry() - Internal function to get the next entry.
+ * @mas: The maple state
+ * @limit: The maximum range start.
+ *
+ * Set the @mas->node to the next entry and the range_start to
+ * the beginning value for the entry. Does not check beyond @limit.
+ * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
+ * @mas->last on overflow.
+ * Restarts on dead nodes.
+ *
+ * Return: the next entry or %NULL.
+ */
+static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+{
+ if (mas->last >= limit) {
+ mas->node = MAS_OVERFLOW;
+ return NULL;
+ }
+
+ return mas_next_slot(mas, limit, false, true);
+}
+
+/*
+ * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
+ * highest gap address of a given size in a given node and descend.
+ * @mas: The maple state
+ * @size: The needed size.
+ *
+ * Return: True if found in a leaf, false otherwise.
+ *
+ */
+static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
+ unsigned long *gap_min, unsigned long *gap_max)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ struct maple_node *node = mas_mn(mas);
+ unsigned long *pivots, *gaps;
+ void __rcu **slots;
+ unsigned long gap = 0;
+ unsigned long max, min;
+ unsigned char offset;
+
+ if (unlikely(mas_is_err(mas)))
+ return true;
+
+ if (ma_is_dense(type)) {
+ /* dense nodes. */
+ mas->offset = (unsigned char)(mas->index - mas->min);
+ return true;
+ }
+
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ gaps = ma_gaps(node, type);
+ offset = mas->offset;
+ min = mas_safe_min(mas, pivots, offset);
+ /* Skip out of bounds. */
+ while (mas->last < min)
+ min = mas_safe_min(mas, pivots, --offset);
+
+ max = mas_safe_pivot(mas, pivots, offset, type);
+ while (mas->index <= max) {
+ gap = 0;
+ if (gaps)
+ gap = gaps[offset];
+ else if (!mas_slot(mas, slots, offset))
+ gap = max - min + 1;
+
+ if (gap) {
+ if ((size <= gap) && (size <= mas->last - min + 1))
+ break;
+
+ if (!gaps) {
+ /* Skip the next slot, it cannot be a gap. */
+ if (offset < 2)
+ goto ascend;
+
+ offset -= 2;
+ max = pivots[offset];
+ min = mas_safe_min(mas, pivots, offset);
+ continue;
+ }
+ }
+
+ if (!offset)
+ goto ascend;
+
+ offset--;
+ max = min - 1;
+ min = mas_safe_min(mas, pivots, offset);
+ }
+
+ if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
+ goto no_space;
+
+ if (unlikely(ma_is_leaf(type))) {
+ mas->offset = offset;
+ *gap_min = min;
+ *gap_max = min + gap - 1;
+ return true;
+ }
+
+ /* descend, only happens under lock. */
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = max;
+ mas->offset = mas_data_end(mas);
+ return false;
+
+ascend:
+ if (!mte_is_root(mas->node))
+ return false;
+
+no_space:
+ mas_set_err(mas, -EBUSY);
+ return false;
+}
+
+static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ unsigned long pivot, min, gap = 0;
+ unsigned char offset, data_end;
+ unsigned long *gaps, *pivots;
+ void __rcu **slots;
+ struct maple_node *node;
+ bool found = false;
+
+ if (ma_is_dense(type)) {
+ mas->offset = (unsigned char)(mas->index - mas->min);
+ return true;
+ }
+
+ node = mas_mn(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ gaps = ma_gaps(node, type);
+ offset = mas->offset;
+ min = mas_safe_min(mas, pivots, offset);
+ data_end = ma_data_end(node, type, pivots, mas->max);
+ for (; offset <= data_end; offset++) {
+ pivot = mas_safe_pivot(mas, pivots, offset, type);
+
+ /* Not within lower bounds */
+ if (mas->index > pivot)
+ goto next_slot;
+
+ if (gaps)
+ gap = gaps[offset];
+ else if (!mas_slot(mas, slots, offset))
+ gap = min(pivot, mas->last) - max(mas->index, min) + 1;
+ else
+ goto next_slot;
+
+ if (gap >= size) {
+ if (ma_is_leaf(type)) {
+ found = true;
+ goto done;
+ }
+ if (mas->index <= pivot) {
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = pivot;
+ offset = 0;
+ break;
+ }
+ }
+next_slot:
+ min = pivot + 1;
+ if (mas->last <= pivot) {
+ mas_set_err(mas, -EBUSY);
+ return true;
+ }
+ }
+
+ if (mte_is_root(mas->node))
+ found = true;
+done:
+ mas->offset = offset;
+ return found;
+}
+
+/**
+ * mas_walk() - Search for @mas->index in the tree.
+ * @mas: The maple state.
+ *
+ * mas->index and mas->last will be set to the range if there is a value. If
+ * mas->node is MAS_NONE, reset to MAS_START.
+ *
+ * Return: the entry at the location or %NULL.
+ */
+void *mas_walk(struct ma_state *mas)
+{
+ void *entry;
+
+ if (!mas_is_active(mas) || !mas_is_start(mas))
+ mas->node = MAS_START;
+retry:
+ entry = mas_state_walk(mas);
+ if (mas_is_start(mas)) {
+ goto retry;
+ } else if (mas_is_none(mas)) {
+ mas->index = 0;
+ mas->last = ULONG_MAX;
+ } else if (mas_is_ptr(mas)) {
+ if (!mas->index) {
+ mas->last = 0;
+ return entry;
+ }
+
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ mas->node = MAS_NONE;
+ return NULL;
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_walk);
+
+static inline bool mas_rewind_node(struct ma_state *mas)
+{
+ unsigned char slot;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ slot = mas->offset;
+ if (!slot)
+ return false;
+ } else {
+ mas_ascend(mas);
+ slot = mas->offset;
+ }
+ } while (!slot);
+
+ mas->offset = --slot;
+ return true;
+}
+
+/*
+ * mas_skip_node() - Internal function. Skip over a node.
+ * @mas: The maple state.
+ *
+ * Return: true if there is another node, false otherwise.
+ */
+static inline bool mas_skip_node(struct ma_state *mas)
+{
+ if (mas_is_err(mas))
+ return false;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ if (mas->offset >= mas_data_end(mas)) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ mas_ascend(mas);
+ }
+ } while (mas->offset >= mas_data_end(mas));
+
+ mas->offset++;
+ return true;
+}
+
+/*
+ * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
+ * @size
+ * @mas: The maple state
+ * @size: The size of the gap required
+ *
+ * Search between @mas->index and @mas->last for a gap of @size.
+ */
+static inline void mas_awalk(struct ma_state *mas, unsigned long size)
+{
+ struct maple_enode *last = NULL;
+
+ /*
+ * There are 4 options:
+ * go to child (descend)
+ * go back to parent (ascend)
+ * no gap found. (return, slot == MAPLE_NODE_SLOTS)
+ * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ */
+ while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
+ if (last == mas->node)
+ mas_skip_node(mas);
+ else
+ last = mas->node;
+ }
+}
+
+/*
+ * mas_sparse_area() - Internal function. Return upper or lower limit when
+ * searching for a gap in an empty tree.
+ * @mas: The maple state
+ * @min: the minimum range
+ * @max: The maximum range
+ * @size: The size of the gap
+ * @fwd: Searching forward or back
+ */
+static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size, bool fwd)
+{
+ if (!unlikely(mas_is_none(mas)) && min == 0) {
+ min++;
+ /*
+ * At this time, min is increased, we need to recheck whether
+ * the size is satisfied.
+ */
+ if (min > max || max - min + 1 < size)
+ return -EBUSY;
+ }
+ /* mas_is_ptr */
+
+ if (fwd) {
+ mas->index = min;
+ mas->last = min + size - 1;
+ } else {
+ mas->last = max;
+ mas->index = max - size + 1;
+ }
+ return 0;
+}
+
+/*
+ * mas_empty_area() - Get the lowest address within the range that is
+ * sufficient for the size requested.
+ * @mas: The maple state
+ * @min: The lowest value of the range
+ * @max: The highest value of the range
+ * @size: The size needed
+ */
+int mas_empty_area(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ unsigned char offset;
+ unsigned long *pivots;
+ enum maple_type mt;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (size == 0 || max - min < size - 1)
+ return -EINVAL;
+
+ if (mas_is_start(mas))
+ mas_start(mas);
+ else if (mas->offset >= 2)
+ mas->offset -= 2;
+ else if (!mas_skip_node(mas))
+ return -EBUSY;
+
+ /* Empty set */
+ if (mas_is_none(mas) || mas_is_ptr(mas))
+ return mas_sparse_area(mas, min, max, size, true);
+
+ /* The start of the window can only be within these values */
+ mas->index = min;
+ mas->last = max;
+ mas_awalk(mas, size);
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ offset = mas->offset;
+ if (unlikely(offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(mas_mn(mas), mt);
+ min = mas_safe_min(mas, pivots, offset);
+ if (mas->index < min)
+ mas->index = min;
+ mas->last = mas->index + size - 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mas_empty_area);
+
+/*
+ * mas_empty_area_rev() - Get the highest address within the range that is
+ * sufficient for the size requested.
+ * @mas: The maple state
+ * @min: The lowest value of the range
+ * @max: The highest value of the range
+ * @size: The size needed
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ struct maple_enode *last = mas->node;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (size == 0 || max - min < size - 1)
+ return -EINVAL;
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ mas->offset = mas_data_end(mas);
+ } else if (mas->offset >= 2) {
+ mas->offset -= 2;
+ } else if (!mas_rewind_node(mas)) {
+ return -EBUSY;
+ }
+
+ /* Empty set. */
+ if (mas_is_none(mas) || mas_is_ptr(mas))
+ return mas_sparse_area(mas, min, max, size, false);
+
+ /* The start of the window can only be within these values. */
+ mas->index = min;
+ mas->last = max;
+
+ while (!mas_rev_awalk(mas, size, &min, &max)) {
+ if (last == mas->node) {
+ if (!mas_rewind_node(mas))
+ return -EBUSY;
+ } else {
+ last = mas->node;
+ }
+ }
+
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+ /* Trim the upper limit to the max. */
+ if (max < mas->last)
+ mas->last = max;
+
+ mas->index = mas->last - size + 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mas_empty_area_rev);
+
+/*
+ * mte_dead_leaves() - Mark all leaves of a node as dead.
+ * @mas: The maple state
+ * @slots: Pointer to the slot array
+ * @type: The maple node type
+ *
+ * Must hold the write lock.
+ *
+ * Return: The number of leaves marked as dead.
+ */
+static inline
+unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
+ void __rcu **slots)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void *entry;
+ int offset;
+
+ for (offset = 0; offset < mt_slot_count(enode); offset++) {
+ entry = mt_slot(mt, slots, offset);
+ type = mte_node_type(entry);
+ node = mte_to_node(entry);
+ /* Use both node and type to catch LE & BE metadata */
+ if (!node || !type)
+ break;
+
+ mte_set_node_dead(entry);
+ node->type = type;
+ rcu_assign_pointer(slots[offset], node);
+ }
+
+ return offset;
+}
+
+/**
+ * mte_dead_walk() - Walk down a dead tree to just before the leaves
+ * @enode: The maple encoded node
+ * @offset: The starting offset
+ *
+ * Note: This can only be used from the RCU callback context.
+ */
+static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
+{
+ struct maple_node *node, *next;
+ void __rcu **slots = NULL;
+
+ next = mte_to_node(*enode);
+ do {
+ *enode = ma_enode_ptr(next);
+ node = mte_to_node(*enode);
+ slots = ma_slots(node, node->type);
+ next = rcu_dereference_protected(slots[offset],
+ lock_is_held(&rcu_callback_map));
+ offset = 0;
+ } while (!ma_is_leaf(next->type));
+
+ return slots;
+}
+
+/**
+ * mt_free_walk() - Walk & free a tree in the RCU callback context
+ * @head: The RCU head that's within the node.
+ *
+ * Note: This can only be used from the RCU callback context.
+ */
+static void mt_free_walk(struct rcu_head *head)
+{
+ void __rcu **slots;
+ struct maple_node *node, *start;
+ struct maple_enode *enode;
+ unsigned char offset;
+ enum maple_type type;
+
+ node = container_of(head, struct maple_node, rcu);
+
+ if (ma_is_leaf(node->type))
+ goto free_leaf;
+
+ start = node;
+ enode = mt_mk_node(node, node->type);
+ slots = mte_dead_walk(&enode, 0);
+ node = mte_to_node(enode);
+ do {
+ mt_free_bulk(node->slot_len, slots);
+ offset = node->parent_slot + 1;
+ enode = node->piv_parent;
+ if (mte_to_node(enode) == node)
+ goto free_leaf;
+
+ type = mte_node_type(enode);
+ slots = ma_slots(mte_to_node(enode), type);
+ if ((offset < mt_slots[type]) &&
+ rcu_dereference_protected(slots[offset],
+ lock_is_held(&rcu_callback_map)))
+ slots = mte_dead_walk(&enode, offset);
+ node = mte_to_node(enode);
+ } while ((node != start) || (node->slot_len < offset));
+
+ slots = ma_slots(node, node->type);
+ mt_free_bulk(node->slot_len, slots);
+
+free_leaf:
+ mt_free_rcu(&node->rcu);
+}
+
+static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
+ struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
+{
+ struct maple_node *node;
+ struct maple_enode *next = *enode;
+ void __rcu **slots = NULL;
+ enum maple_type type;
+ unsigned char next_offset = 0;
+
+ do {
+ *enode = next;
+ node = mte_to_node(*enode);
+ type = mte_node_type(*enode);
+ slots = ma_slots(node, type);
+ next = mt_slot_locked(mt, slots, next_offset);
+ if ((mte_dead_node(next)))
+ next = mt_slot_locked(mt, slots, ++next_offset);
+
+ mte_set_node_dead(*enode);
+ node->type = type;
+ node->piv_parent = prev;
+ node->parent_slot = offset;
+ offset = next_offset;
+ next_offset = 0;
+ prev = *enode;
+ } while (!mte_is_leaf(next));
+
+ return slots;
+}
+
+static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ bool free)
+{
+ void __rcu **slots;
+ struct maple_node *node = mte_to_node(enode);
+ struct maple_enode *start;
+
+ if (mte_is_leaf(enode)) {
+ node->type = mte_node_type(enode);
+ goto free_leaf;
+ }
+
+ start = enode;
+ slots = mte_destroy_descend(&enode, mt, start, 0);
+ node = mte_to_node(enode); // Updated in the above call.
+ do {
+ enum maple_type type;
+ unsigned char offset;
+ struct maple_enode *parent, *tmp;
+
+ node->slot_len = mte_dead_leaves(enode, mt, slots);
+ if (free)
+ mt_free_bulk(node->slot_len, slots);
+ offset = node->parent_slot + 1;
+ enode = node->piv_parent;
+ if (mte_to_node(enode) == node)
+ goto free_leaf;
+
+ type = mte_node_type(enode);
+ slots = ma_slots(mte_to_node(enode), type);
+ if (offset >= mt_slots[type])
+ goto next;
+
+ tmp = mt_slot_locked(mt, slots, offset);
+ if (mte_node_type(tmp) && mte_to_node(tmp)) {
+ parent = enode;
+ enode = tmp;
+ slots = mte_destroy_descend(&enode, mt, parent, offset);
+ }
+next:
+ node = mte_to_node(enode);
+ } while (start != enode);
+
+ node = mte_to_node(enode);
+ node->slot_len = mte_dead_leaves(enode, mt, slots);
+ if (free)
+ mt_free_bulk(node->slot_len, slots);
+
+free_leaf:
+ if (free)
+ mt_free_rcu(&node->rcu);
+ else
+ mt_clear_meta(mt, node, node->type);
+}
+
+/*
+ * mte_destroy_walk() - Free a tree or sub-tree.
+ * @enode: the encoded maple node (maple_enode) to start
+ * @mt: the tree to free - needed for node types.
+ *
+ * Must hold the write lock.
+ */
+static inline void mte_destroy_walk(struct maple_enode *enode,
+ struct maple_tree *mt)
+{
+ struct maple_node *node = mte_to_node(enode);
+
+ if (mt_in_rcu(mt)) {
+ mt_destroy_walk(enode, mt, false);
+ call_rcu(&node->rcu, mt_free_walk);
+ } else {
+ mt_destroy_walk(enode, mt, true);
+ }
+}
+
+static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+{
+ if (!mas_is_active(wr_mas->mas)) {
+ if (mas_is_start(wr_mas->mas))
+ return;
+
+ if (unlikely(mas_is_paused(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_none(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(wr_mas->mas)))
+ goto reset;
+ }
+
+ /*
+ * A less strict version of mas_is_span_wr() where we allow spanning
+ * writes within this node. This is to stop partial walks in
+ * mas_prealloc() from being reset.
+ */
+ if (wr_mas->mas->last > wr_mas->mas->max)
+ goto reset;
+
+ if (wr_mas->entry)
+ return;
+
+ if (mte_is_leaf(wr_mas->mas->node) &&
+ wr_mas->mas->last == wr_mas->mas->max)
+ goto reset;
+
+ return;
+
+reset:
+ mas_reset(wr_mas->mas);
+}
+
+/* Interface */
+
+/**
+ * mas_store() - Store an @entry.
+ * @mas: The maple state.
+ * @entry: The entry to store.
+ *
+ * The @mas->index and @mas->last is used to set the range for the @entry.
+ * Note: The @mas should have pre-allocated entries to ensure there is memory to
+ * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
+ *
+ * Return: the first entry between mas->index and mas->last or %NULL.
+ */
+void *mas_store(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ trace_ma_write(__func__, mas, 0, entry);
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if (MAS_WARN_ON(mas, mas->index > mas->last))
+ pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
+
+ if (mas->index > mas->last) {
+ mas_set_err(mas, -EINVAL);
+ return NULL;
+ }
+
+#endif
+
+ /*
+ * Storing is the same operation as insert with the added caveat that it
+ * can overwrite entries. Although this seems simple enough, one may
+ * want to examine what happens if a single store operation was to
+ * overwrite multiple entries within a self-balancing B-Tree.
+ */
+ mas_wr_store_setup(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
+ return wr_mas.content;
+}
+EXPORT_SYMBOL_GPL(mas_store);
+
+/**
+ * mas_store_gfp() - Store a value into the tree.
+ * @mas: The maple state
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations if necessary.
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ mas_wr_store_setup(&wr_mas);
+ trace_ma_write(__func__, mas, 0, entry);
+retry:
+ mas_wr_store_entry(&wr_mas);
+ if (unlikely(mas_nomem(mas, gfp)))
+ goto retry;
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mas_store_gfp);
+
+/**
+ * mas_store_prealloc() - Store a value into the tree using memory
+ * preallocated in the maple state.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ */
+void mas_store_prealloc(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ mas_wr_store_setup(&wr_mas);
+ trace_ma_write(__func__, mas, 0, entry);
+ mas_wr_store_entry(&wr_mas);
+ MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
+ mas_destroy(mas);
+}
+EXPORT_SYMBOL_GPL(mas_store_prealloc);
+
+/**
+ * mas_preallocate() - Preallocate enough nodes for a store operation
+ * @mas: The maple state
+ * @entry: The entry that will be stored
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+ unsigned char node_size;
+ int request = 1;
+ int ret;
+
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ goto ask_now;
+
+ mas_wr_store_setup(&wr_mas);
+ wr_mas.content = mas_start(mas);
+ /* Root expand */
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
+ goto ask_now;
+
+ if (unlikely(!mas_wr_walk(&wr_mas))) {
+ /* Spanning store, use worst case for now */
+ request = 1 + mas_mt_height(mas) * 3;
+ goto ask_now;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ /* Exact fit, no nodes needed. */
+ if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
+ return 0;
+
+ mas_wr_end_piv(&wr_mas);
+ node_size = mas_wr_new_end(&wr_mas);
+
+ /* Slot store, does not require additional nodes */
+ if (node_size == wr_mas.node_end) {
+ /* reuse node */
+ if (!mt_in_rcu(mas->tree))
+ return 0;
+ /* shifting boundary */
+ if (wr_mas.offset_end - mas->offset == 1)
+ return 0;
+ }
+
+ if (node_size >= mt_slots[wr_mas.type]) {
+ /* Split, worst case for now. */
+ request = 1 + mas_mt_height(mas) * 2;
+ goto ask_now;
+ }
+
+ /* New root needs a singe node */
+ if (unlikely(mte_is_root(mas->node)))
+ goto ask_now;
+
+ /* Potential spanning rebalance collapsing a node, use worst-case */
+ if (node_size - 1 <= mt_min_slots[wr_mas.type])
+ request = mas_mt_height(mas) * 2 - 1;
+
+ /* node store, slot store needs one node */
+ask_now:
+ mas_node_count_gfp(mas, request, gfp);
+ mas->mas_flags |= MA_STATE_PREALLOC;
+ if (likely(!mas_is_err(mas)))
+ return 0;
+
+ mas_set_alloc_req(mas, 0);
+ ret = xa_err(mas->node);
+ mas_reset(mas);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mas_preallocate);
+
+/*
+ * mas_destroy() - destroy a maple state.
+ * @mas: The maple state
+ *
+ * Upon completion, check the left-most node and rebalance against the node to
+ * the right if necessary. Frees any allocated nodes associated with this maple
+ * state.
+ */
+void mas_destroy(struct ma_state *mas)
+{
+ struct maple_alloc *node;
+ unsigned long total;
+
+ /*
+ * When using mas_for_each() to insert an expected number of elements,
+ * it is possible that the number inserted is less than the expected
+ * number. To fix an invalid final node, a check is performed here to
+ * rebalance the previous node with the final node.
+ */
+ if (mas->mas_flags & MA_STATE_REBALANCE) {
+ unsigned char end;
+
+ mas_start(mas);
+ mtree_range_walk(mas);
+ end = mas_data_end(mas) + 1;
+ if (end < mt_min_slot_count(mas->node) - 1)
+ mas_destroy_rebalance(mas, end);
+
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ }
+ mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
+
+ total = mas_allocated(mas);
+ while (total) {
+ node = mas->alloc;
+ mas->alloc = node->slot[0];
+ if (node->node_count > 1) {
+ size_t count = node->node_count - 1;
+
+ mt_free_bulk(count, (void __rcu **)&node->slot[1]);
+ total -= count;
+ }
+ kmem_cache_free(maple_node_cache, node);
+ total--;
+ }
+
+ mas->alloc = NULL;
+}
+EXPORT_SYMBOL_GPL(mas_destroy);
+
+/*
+ * mas_expected_entries() - Set the expected number of entries that will be inserted.
+ * @mas: The maple state
+ * @nr_entries: The number of expected entries.
+ *
+ * This will attempt to pre-allocate enough nodes to store the expected number
+ * of entries. The allocations will occur using the bulk allocator interface
+ * for speed. Please call mas_destroy() on the @mas after inserting the entries
+ * to ensure any unused nodes are freed.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
+{
+ int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
+ struct maple_enode *enode = mas->node;
+ int nr_nodes;
+ int ret;
+
+ /*
+ * Sometimes it is necessary to duplicate a tree to a new tree, such as
+ * forking a process and duplicating the VMAs from one tree to a new
+ * tree. When such a situation arises, it is known that the new tree is
+ * not going to be used until the entire tree is populated. For
+ * performance reasons, it is best to use a bulk load with RCU disabled.
+ * This allows for optimistic splitting that favours the left and reuse
+ * of nodes during the operation.
+ */
+
+ /* Optimize splitting for bulk insert in-order */
+ mas->mas_flags |= MA_STATE_BULK;
+
+ /*
+ * Avoid overflow, assume a gap between each entry and a trailing null.
+ * If this is wrong, it just means allocation can happen during
+ * insertion of entries.
+ */
+ nr_nodes = max(nr_entries, nr_entries * 2 + 1);
+ if (!mt_is_alloc(mas->tree))
+ nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
+
+ /* Leaves; reduce slots to keep space for expansion */
+ nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
+ /* Internal nodes */
+ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ /* Add working room for split (2 nodes) + new parents */
+ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
+
+ /* Detect if allocations run out */
+ mas->mas_flags |= MA_STATE_PREALLOC;
+
+ if (!mas_is_err(mas))
+ return 0;
+
+ ret = xa_err(mas->node);
+ mas->node = enode;
+ mas_destroy(mas);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(mas_expected_entries);
+
+static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+ void **entry)
+{
+ bool was_none = mas_is_none(mas);
+
+ if (unlikely(mas->last >= max)) {
+ mas->node = MAS_OVERFLOW;
+ return true;
+ }
+
+ if (mas_is_active(mas))
+ return false;
+
+ if (mas_is_none(mas) || mas_is_paused(mas)) {
+ mas->node = MAS_START;
+ } else if (mas_is_overflow(mas)) {
+ /* Overflowed before, but the max changed */
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas)) {
+ mas->node = MAS_START;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
+
+ if (mas_is_start(mas))
+ *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+
+ if (mas_is_ptr(mas)) {
+ *entry = NULL;
+ if (was_none && mas->index == 0) {
+ mas->index = mas->last = 0;
+ return true;
+ }
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ mas->node = MAS_NONE;
+ return true;
+ }
+
+ if (mas_is_none(mas))
+ return true;
+
+ return false;
+}
+
+/**
+ * mas_next() - Get the next entry.
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Returns the next entry after @mas->index.
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_next_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, false, true);
+}
+EXPORT_SYMBOL_GPL(mas_next);
+
+/**
+ * mas_next_range() - Advance the maple state to the next range
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Sets @mas->index and @mas->last to the range.
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next_range(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_next_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, true, true);
+}
+EXPORT_SYMBOL_GPL(mas_next_range);
+
+/**
+ * mt_next() - get the next value in the maple tree
+ * @mt: The maple tree
+ * @index: The start index
+ * @max: The maximum index to check
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * Return: The entry higher than @index or %NULL if nothing is found.
+ */
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
+{
+ void *entry = NULL;
+ MA_STATE(mas, mt, index, index);
+
+ rcu_read_lock();
+ entry = mas_next(&mas, max);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mt_next);
+
+static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
+ void **entry)
+{
+ if (unlikely(mas->index <= min)) {
+ mas->node = MAS_UNDERFLOW;
+ return true;
+ }
+
+ if (mas_is_active(mas))
+ return false;
+
+ if (mas_is_overflow(mas)) {
+ mas->node = MAS_START;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
+
+ if (mas_is_none(mas) || mas_is_paused(mas)) {
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas)) {
+ /* underflowed before but the min changed */
+ mas->node = MAS_START;
+ }
+
+ if (mas_is_start(mas))
+ mas_walk(mas);
+
+ if (unlikely(mas_is_ptr(mas))) {
+ if (!mas->index)
+ goto none;
+ mas->index = mas->last = 0;
+ *entry = mas_root(mas);
+ return true;
+ }
+
+ if (mas_is_none(mas)) {
+ if (mas->index) {
+ /* Walked to out-of-range pointer? */
+ mas->index = mas->last = 0;
+ mas->node = MAS_ROOT;
+ *entry = mas_root(mas);
+ return true;
+ }
+ return true;
+ }
+
+ return false;
+
+none:
+ mas->node = MAS_NONE;
+ return true;
+}
+
+/**
+ * mas_prev() - Get the previous entry
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * searchable nodes.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_prev_setup(mas, min, &entry))
+ return entry;
+
+ return mas_prev_slot(mas, min, false, true);
+}
+EXPORT_SYMBOL_GPL(mas_prev);
+
+/**
+ * mas_prev_range() - Advance to the previous range
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Sets @mas->index and @mas->last to the range.
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * searchable nodes.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev_range(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_prev_setup(mas, min, &entry))
+ return entry;
+
+ return mas_prev_slot(mas, min, true, true);
+}
+EXPORT_SYMBOL_GPL(mas_prev_range);
+
+/**
+ * mt_prev() - get the previous value in the maple tree
+ * @mt: The maple tree
+ * @index: The start index
+ * @min: The minimum index to check
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * Return: The entry before @index or %NULL if nothing is found.
+ */
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
+{
+ void *entry = NULL;
+ MA_STATE(mas, mt, index, index);
+
+ rcu_read_lock();
+ entry = mas_prev(&mas, min);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mt_prev);
+
+/**
+ * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
+ * @mas: The maple state to pause
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @mas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call mas_pause(), the mt_for_each()
+ * iterator may be more appropriate.
+ *
+ */
+void mas_pause(struct ma_state *mas)
+{
+ mas->node = MAS_PAUSE;
+}
+EXPORT_SYMBOL_GPL(mas_pause);
+
+/**
+ * mas_find_setup() - Internal function to set up mas_find*().
+ * @mas: The maple state
+ * @max: The maximum index
+ * @entry: Pointer to the entry
+ *
+ * Returns: True if entry is the answer, false otherwise.
+ */
+static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
+ void **entry)
+{
+ if (mas_is_active(mas)) {
+ if (mas->last < max)
+ return false;
+
+ return true;
+ }
+
+ if (mas_is_paused(mas)) {
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->index = ++mas->last;
+ mas->node = MAS_START;
+ } else if (mas_is_none(mas)) {
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->index = mas->last;
+ mas->node = MAS_START;
+ } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
+ if (mas->index > max) {
+ mas->node = MAS_OVERFLOW;
+ return true;
+ }
+
+ mas->node = MAS_START;
+ }
+
+ if (mas_is_start(mas)) {
+ /* First run or continue */
+ if (mas->index > max)
+ return true;
+
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+
+ }
+
+ if (unlikely(!mas_searchable(mas))) {
+ if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+
+ return true;
+ }
+
+ if (mas->index == max)
+ return true;
+
+ return false;
+
+ptr_out_of_range:
+ mas->node = MAS_NONE;
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ return true;
+}
+
+/**
+ * mas_find() - On the first call, find the entry at or after mas->index up to
+ * %max. Otherwise, find the entry after mas->index.
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_find_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, false, false);
+}
+EXPORT_SYMBOL_GPL(mas_find);
+
+/**
+ * mas_find_range() - On the first call, find the entry at or after
+ * mas->index up to %max. Otherwise, advance to the next slot mas->index.
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_range(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_find_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, true, false);
+}
+EXPORT_SYMBOL_GPL(mas_find_range);
+
+/**
+ * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
+ * @mas: The maple state
+ * @min: The minimum index
+ * @entry: Pointer to the entry
+ *
+ * Returns: True if entry is the answer, false otherwise.
+ */
+static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+ void **entry)
+{
+ if (mas_is_active(mas)) {
+ if (mas->index > min)
+ return false;
+
+ return true;
+ }
+
+ if (mas_is_paused(mas)) {
+ if (unlikely(mas->index <= min)) {
+ mas->node = MAS_NONE;
+ return true;
+ }
+ mas->node = MAS_START;
+ mas->last = --mas->index;
+ } else if (mas_is_none(mas)) {
+ if (mas->index <= min)
+ goto none;
+
+ mas->last = mas->index;
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
+ if (mas->last <= min) {
+ mas->node = MAS_UNDERFLOW;
+ return true;
+ }
+
+ mas->node = MAS_START;
+ }
+
+ if (mas_is_start(mas)) {
+ /* First run or continue */
+ if (mas->index < min)
+ return true;
+
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
+
+ if (unlikely(!mas_searchable(mas))) {
+ if (mas_is_ptr(mas))
+ goto none;
+
+ if (mas_is_none(mas)) {
+ /*
+ * Walked to the location, and there was nothing so the
+ * previous location is 0.
+ */
+ mas->last = mas->index = 0;
+ mas->node = MAS_ROOT;
+ *entry = mas_root(mas);
+ return true;
+ }
+ }
+
+ if (mas->index < min)
+ return true;
+
+ return false;
+
+none:
+ mas->node = MAS_NONE;
+ return true;
+}
+
+/**
+ * mas_find_rev: On the first call, find the first non-null entry at or below
+ * mas->index down to %min. Otherwise find the first non-null entry below
+ * mas->index down to %min.
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_rev(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_find_rev_setup(mas, min, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_prev_slot */
+ return mas_prev_slot(mas, min, false, false);
+
+}
+EXPORT_SYMBOL_GPL(mas_find_rev);
+
+/**
+ * mas_find_range_rev: On the first call, find the first non-null entry at or
+ * below mas->index down to %min. Otherwise advance to the previous slot after
+ * mas->index down to %min.
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
+
+ if (mas_find_rev_setup(mas, min, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_prev_slot */
+ return mas_prev_slot(mas, min, true, false);
+}
+EXPORT_SYMBOL_GPL(mas_find_range_rev);
+
+/**
+ * mas_erase() - Find the range in which index resides and erase the entire
+ * range.
+ * @mas: The maple state
+ *
+ * Must hold the write lock.
+ * Searches for @mas->index, sets @mas->index and @mas->last to the range and
+ * erases that range.
+ *
+ * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
+ */
+void *mas_erase(struct ma_state *mas)
+{
+ void *entry;
+ MA_WR_STATE(wr_mas, mas, NULL);
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+
+ /* Retry unnecessary when holding the write lock. */
+ entry = mas_state_walk(mas);
+ if (!entry)
+ return NULL;
+
+write_retry:
+ /* Must reset to ensure spanning writes of last slot are detected */
+ mas_reset(mas);
+ mas_wr_store_setup(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
+ if (mas_nomem(mas, GFP_KERNEL))
+ goto write_retry;
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_erase);
+
+/**
+ * mas_nomem() - Check if there was an error allocating and do the allocation
+ * if necessary If there are allocations, then free them.
+ * @mas: The maple state
+ * @gfp: The GFP_FLAGS to use for allocations
+ * Return: true on allocation, false otherwise.
+ */
+bool mas_nomem(struct ma_state *mas, gfp_t gfp)
+ __must_hold(mas->tree->ma_lock)
+{
+ if (likely(mas->node != MA_ERROR(-ENOMEM))) {
+ mas_destroy(mas);
+ return false;
+ }
+
+ if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
+ mtree_unlock(mas->tree);
+ mas_alloc_nodes(mas, gfp);
+ mtree_lock(mas->tree);
+ } else {
+ mas_alloc_nodes(mas, gfp);
+ }
+
+ if (!mas_allocated(mas))
+ return false;
+
+ mas->node = MAS_START;
+ return true;
+}
+
+void __init maple_tree_init(void)
+{
+ maple_node_cache = kmem_cache_create("maple_node",
+ sizeof(struct maple_node), sizeof(struct maple_node),
+ SLAB_PANIC, NULL);
+}
+
+/**
+ * mtree_load() - Load a value stored in a maple tree
+ * @mt: The maple tree
+ * @index: The index to load
+ *
+ * Return: the entry or %NULL
+ */
+void *mtree_load(struct maple_tree *mt, unsigned long index)
+{
+ MA_STATE(mas, mt, index, index);
+ void *entry;
+
+ trace_ma_read(__func__, &mas);
+ rcu_read_lock();
+retry:
+ entry = mas_start(&mas);
+ if (unlikely(mas_is_none(&mas)))
+ goto unlock;
+
+ if (unlikely(mas_is_ptr(&mas))) {
+ if (index)
+ entry = NULL;
+
+ goto unlock;
+ }
+
+ entry = mtree_lookup_walk(&mas);
+ if (!entry && unlikely(mas_is_start(&mas)))
+ goto retry;
+unlock:
+ rcu_read_unlock();
+ if (xa_is_zero(entry))
+ return NULL;
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_load);
+
+/**
+ * mtree_store_range() - Store an entry at a given range.
+ * @mt: The maple tree
+ * @index: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store_range(struct maple_tree *mt, unsigned long index,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(mas, mt, index, last);
+ MA_WR_STATE(wr_mas, &mas, entry);
+
+ trace_ma_write(__func__, &mas, 0, entry);
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (index > last)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas_wr_store_entry(&wr_mas);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ if (mas_is_err(&mas))
+ return xa_err(mas.node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtree_store_range);
+
+/**
+ * mtree_store() - Store an entry at a given index.
+ * @mt: The maple tree
+ * @index: The index to store the value
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_store_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_store);
+
+/**
+ * mtree_insert_range() - Insert an entry at a given range if there is no value.
+ * @mt: The maple tree
+ * @first: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
+ * request, -ENOMEM if memory could not be allocated.
+ */
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(ms, mt, first, last);
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (first > last)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas_insert(&ms, entry);
+ if (mas_nomem(&ms, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ if (mas_is_err(&ms))
+ return xa_err(ms.node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtree_insert_range);
+
+/**
+ * mtree_insert() - Insert an entry at a given index if there is no value.
+ * @mt: The maple tree
+ * @index : The index to store the value
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
+ * request, -ENOMEM if memory could not be allocated.
+ */
+int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_insert_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_insert);
+
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ ret = mas_empty_area(&mas, min, max, size);
+ if (ret)
+ goto unlock;
+
+ mas_insert(&mas, entry);
+ /*
+ * mas_nomem() may release the lock, causing the allocated area
+ * to be unavailable, so try to allocate a free area again.
+ */
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ if (mas_is_err(&mas))
+ ret = xa_err(mas.node);
+ else
+ *startp = mas.index;
+
+unlock:
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_range);
+
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ ret = mas_empty_area_rev(&mas, min, max, size);
+ if (ret)
+ goto unlock;
+
+ mas_insert(&mas, entry);
+ /*
+ * mas_nomem() may release the lock, causing the allocated area
+ * to be unavailable, so try to allocate a free area again.
+ */
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ if (mas_is_err(&mas))
+ ret = xa_err(mas.node);
+ else
+ *startp = mas.index;
+
+unlock:
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_rrange);
+
+/**
+ * mtree_erase() - Find an index and erase the entire range.
+ * @mt: The maple tree
+ * @index: The index to erase
+ *
+ * Erasing is the same as a walk to an entry then a store of a NULL to that
+ * ENTIRE range. In fact, it is implemented as such using the advanced API.
+ *
+ * Return: The entry stored at the @index or %NULL
+ */
+void *mtree_erase(struct maple_tree *mt, unsigned long index)
+{
+ void *entry = NULL;
+
+ MA_STATE(mas, mt, index, index);
+ trace_ma_op(__func__, &mas);
+
+ mtree_lock(mt);
+ entry = mas_erase(&mas);
+ mtree_unlock(mt);
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_erase);
+
+/**
+ * __mt_destroy() - Walk and free all nodes of a locked maple tree.
+ * @mt: The maple tree
+ *
+ * Note: Does not handle locking.
+ */
+void __mt_destroy(struct maple_tree *mt)
+{
+ void *root = mt_root_locked(mt);
+
+ rcu_assign_pointer(mt->ma_root, NULL);
+ if (xa_is_node(root))
+ mte_destroy_walk(root, mt);
+
+ mt->ma_flags = 0;
+}
+EXPORT_SYMBOL_GPL(__mt_destroy);
+
+/**
+ * mtree_destroy() - Destroy a maple tree
+ * @mt: The maple tree
+ *
+ * Frees all resources used by the tree. Handles locking.
+ */
+void mtree_destroy(struct maple_tree *mt)
+{
+ mtree_lock(mt);
+ __mt_destroy(mt);
+ mtree_unlock(mt);
+}
+EXPORT_SYMBOL(mtree_destroy);
+
+/**
+ * mt_find() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @index: Pointer which contains the start location of the search
+ * @max: The maximum value of the search range
+ *
+ * Takes RCU read lock internally to protect the search, which does not
+ * protect the returned pointer after dropping RCU read lock.
+ * See also: Documentation/core-api/maple_tree.rst
+ *
+ * In case that an entry is found @index is updated to point to the next
+ * possible entry independent whether the found entry is occupying a
+ * single index or a range if indices.
+ *
+ * Return: The entry at or after the @index or %NULL
+ */
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
+{
+ MA_STATE(mas, mt, *index, *index);
+ void *entry;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ unsigned long copy = *index;
+#endif
+
+ trace_ma_read(__func__, &mas);
+
+ if ((*index) > max)
+ return NULL;
+
+ rcu_read_lock();
+retry:
+ entry = mas_state_walk(&mas);
+ if (mas_is_start(&mas))
+ goto retry;
+
+ if (unlikely(xa_is_zero(entry)))
+ entry = NULL;
+
+ if (entry)
+ goto unlock;
+
+ while (mas_searchable(&mas) && (mas.last < max)) {
+ entry = mas_next_entry(&mas, max);
+ if (likely(entry && !xa_is_zero(entry)))
+ break;
+ }
+
+ if (unlikely(xa_is_zero(entry)))
+ entry = NULL;
+unlock:
+ rcu_read_unlock();
+ if (likely(entry)) {
+ *index = mas.last + 1;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
+ pr_err("index not increased! %lx <= %lx\n",
+ *index, copy);
+#endif
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL(mt_find);
+
+/**
+ * mt_find_after() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @index: Pointer which contains the start location of the search
+ * @max: The maximum value to check
+ *
+ * Same as mt_find() except that it checks @index for 0 before
+ * searching. If @index == 0, the search is aborted. This covers a wrap
+ * around of @index to 0 in an iterator loop.
+ *
+ * Return: The entry at or after the @index or %NULL
+ */
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max)
+{
+ if (!(*index))
+ return NULL;
+
+ return mt_find(mt, index, max);
+}
+EXPORT_SYMBOL(mt_find_after);
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+atomic_t maple_tree_tests_run;
+EXPORT_SYMBOL_GPL(maple_tree_tests_run);
+atomic_t maple_tree_tests_passed;
+EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
+
+#ifndef __KERNEL__
+extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
+void mt_set_non_kernel(unsigned int val)
+{
+ kmem_cache_set_non_kernel(maple_node_cache, val);
+}
+
+extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
+unsigned long mt_get_alloc_size(void)
+{
+ return kmem_cache_get_alloc(maple_node_cache);
+}
+
+extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
+void mt_zero_nr_tallocated(void)
+{
+ kmem_cache_zero_nr_tallocated(maple_node_cache);
+}
+
+extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
+unsigned int mt_nr_tallocated(void)
+{
+ return kmem_cache_nr_tallocated(maple_node_cache);
+}
+
+extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
+unsigned int mt_nr_allocated(void)
+{
+ return kmem_cache_nr_allocated(maple_node_cache);
+}
+
+/*
+ * mas_dead_node() - Check if the maple state is pointing to a dead node.
+ * @mas: The maple state
+ * @index: The index to restore in @mas.
+ *
+ * Used in test code.
+ * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
+ */
+static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
+{
+ if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
+ return 0;
+
+ if (likely(!mte_dead_node(mas->node)))
+ return 0;
+
+ mas_rewalk(mas, index);
+ return 1;
+}
+
+void mt_cache_shrink(void)
+{
+}
+#else
+/*
+ * mt_cache_shrink() - For testing, don't use this.
+ *
+ * Certain testcases can trigger an OOM when combined with other memory
+ * debugging configuration options. This function is used to reduce the
+ * possibility of an out of memory even due to kmem_cache objects remaining
+ * around for longer than usual.
+ */
+void mt_cache_shrink(void)
+{
+ kmem_cache_shrink(maple_node_cache);
+
+}
+EXPORT_SYMBOL_GPL(mt_cache_shrink);
+
+#endif /* not defined __KERNEL__ */
+/*
+ * mas_get_slot() - Get the entry in the maple state node stored at @offset.
+ * @mas: The maple state
+ * @offset: The offset into the slot array to fetch.
+ *
+ * Return: The entry stored at @offset.
+ */
+static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
+ unsigned char offset)
+{
+ return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
+ offset);
+}
+
+/* Depth first search, post-order */
+static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
+{
+
+ struct maple_enode *p = MAS_NONE, *mn = mas->node;
+ unsigned long p_min, p_max;
+
+ mas_next_node(mas, mas_mn(mas), max);
+ if (!mas_is_none(mas))
+ return;
+
+ if (mte_is_root(mn))
+ return;
+
+ mas->node = mn;
+ mas_ascend(mas);
+ do {
+ p = mas->node;
+ p_min = mas->min;
+ p_max = mas->max;
+ mas_prev_node(mas, 0);
+ } while (!mas_is_none(mas));
+
+ mas->node = p;
+ mas->max = p_max;
+ mas->min = p_min;
+}
+
+/* Tree validations */
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format);
+static void mt_dump_range(unsigned long min, unsigned long max,
+ unsigned int depth, enum mt_dump_format format)
+{
+ static const char spaces[] = " ";
+
+ switch(format) {
+ case mt_dump_hex:
+ if (min == max)
+ pr_info("%.*s%lx: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
+ break;
+ default:
+ case mt_dump_dec:
+ if (min == max)
+ pr_info("%.*s%lu: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
+ }
+}
+
+static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
+ unsigned int depth, enum mt_dump_format format)
+{
+ mt_dump_range(min, max, depth, format);
+
+ if (xa_is_value(entry))
+ pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
+ else if (xa_is_zero(entry))
+ pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else if (mt_is_reserved(entry))
+ pr_cont("UNKNOWN ENTRY (%p)\n", entry);
+ else
+ pr_cont("%p\n", entry);
+}
+
+static void mt_dump_range64(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
+{
+ struct maple_range_64 *node = &mte_to_node(entry)->mr64;
+ bool leaf = mte_is_leaf(entry);
+ unsigned long first = min;
+ int i;
+
+ pr_cont(" contents: ");
+ for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
+ switch(format) {
+ case mt_dump_hex:
+ pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ }
+ }
+ pr_cont("%p\n", node->slot[i]);
+ for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
+ unsigned long last = max;
+
+ if (i < (MAPLE_RANGE64_SLOTS - 1))
+ last = node->pivot[i];
+ else if (!node->slot[i] && max != mt_node_max(entry))
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+ else if (node->slot[i])
+ mt_dump_node(mt, mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+
+ if (last == max)
+ break;
+ if (last > max) {
+ switch(format) {
+ case mt_dump_hex:
+ pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
+ node, last, max, i);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ }
+ }
+ first = last + 1;
+ }
+}
+
+static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
+{
+ struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
+ bool leaf = mte_is_leaf(entry);
+ unsigned long first = min;
+ int i;
+
+ pr_cont(" contents: ");
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
+ switch (format) {
+ case mt_dump_hex:
+ pr_cont("%lx ", node->gap[i]);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_cont("%lu ", node->gap[i]);
+ }
+ }
+ pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
+ switch (format) {
+ case mt_dump_hex:
+ pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ }
+ }
+ pr_cont("%p\n", node->slot[i]);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
+ unsigned long last = max;
+
+ if (i < (MAPLE_ARANGE64_SLOTS - 1))
+ last = node->pivot[i];
+ else if (!node->slot[i])
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+ else if (node->slot[i])
+ mt_dump_node(mt, mt_slot(mt, node->slot, i),
+ first, last, depth + 1, format);
+
+ if (last == max)
+ break;
+ if (last > max) {
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ break;
+ }
+ first = last + 1;
+ }
+}
+
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
+{
+ struct maple_node *node = mte_to_node(entry);
+ unsigned int type = mte_node_type(entry);
+ unsigned int i;
+
+ mt_dump_range(min, max, depth, format);
+
+ pr_cont("node %p depth %d type %d parent %p", node, depth, type,
+ node ? node->parent : NULL);
+ switch (type) {
+ case maple_dense:
+ pr_cont("\n");
+ for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
+ if (min + i > max)
+ pr_cont("OUT OF RANGE: ");
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ min + i, min + i, depth, format);
+ }
+ break;
+ case maple_leaf_64:
+ case maple_range_64:
+ mt_dump_range64(mt, entry, min, max, depth, format);
+ break;
+ case maple_arange_64:
+ mt_dump_arange64(mt, entry, min, max, depth, format);
+ break;
+
+ default:
+ pr_cont(" UNKNOWN TYPE\n");
+ }
+}
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
+{
+ void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
+
+ pr_info("maple_tree(%p) flags %X, height %u root %p\n",
+ mt, mt->ma_flags, mt_height(mt), entry);
+ if (!xa_is_node(entry))
+ mt_dump_entry(entry, 0, 0, 0, format);
+ else if (entry)
+ mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
+}
+EXPORT_SYMBOL_GPL(mt_dump);
+
+/*
+ * Calculate the maximum gap in a node and check if that's what is reported in
+ * the parent (unless root).
+ */
+static void mas_validate_gaps(struct ma_state *mas)
+{
+ struct maple_enode *mte = mas->node;
+ struct maple_node *p_mn, *node = mte_to_node(mte);
+ enum maple_type mt = mte_node_type(mas->node);
+ unsigned long gap = 0, max_gap = 0;
+ unsigned long p_end, p_start = mas->min;
+ unsigned char p_slot, offset;
+ unsigned long *gaps = NULL;
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned int i;
+
+ if (ma_is_dense(mt)) {
+ for (i = 0; i < mt_slot_count(mte); i++) {
+ if (mas_get_slot(mas, i)) {
+ if (gap > max_gap)
+ max_gap = gap;
+ gap = 0;
+ continue;
+ }
+ gap++;
+ }
+ goto counted;
+ }
+
+ gaps = ma_gaps(node, mt);
+ for (i = 0; i < mt_slot_count(mte); i++) {
+ p_end = mas_safe_pivot(mas, pivots, i, mt);
+
+ if (!gaps) {
+ if (!mas_get_slot(mas, i))
+ gap = p_end - p_start + 1;
+ } else {
+ void *entry = mas_get_slot(mas, i);
+
+ gap = gaps[i];
+ MT_BUG_ON(mas->tree, !entry);
+
+ if (gap > p_end - p_start + 1) {
+ pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
+ mas_mn(mas), i, gap, p_end, p_start,
+ p_end - p_start + 1);
+ MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
+ }
+ }
+
+ if (gap > max_gap)
+ max_gap = gap;
+
+ p_start = p_end + 1;
+ if (p_end >= mas->max)
+ break;
+ }
+
+counted:
+ if (mt == maple_arange_64) {
+ offset = ma_meta_gap(node, mt);
+ if (offset > i) {
+ pr_err("gap offset %p[%u] is invalid\n", node, offset);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (gaps[offset] != max_gap) {
+ pr_err("gap %p[%u] is not the largest gap %lu\n",
+ node, offset, max_gap);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ MT_BUG_ON(mas->tree, !gaps);
+ for (i++ ; i < mt_slot_count(mte); i++) {
+ if (gaps[i] != 0) {
+ pr_err("gap %p[%u] beyond node limit != 0\n",
+ node, i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+ }
+ }
+
+ if (mte_is_root(mte))
+ return;
+
+ p_slot = mte_parent_slot(mas->node);
+ p_mn = mte_parent(mte);
+ MT_BUG_ON(mas->tree, max_gap > mas->max);
+ if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
+ pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
+ mt_dump(mas->tree, mt_dump_hex);
+ MT_BUG_ON(mas->tree, 1);
+ }
+}
+
+static void mas_validate_parent_slot(struct ma_state *mas)
+{
+ struct maple_node *parent;
+ struct maple_enode *node;
+ enum maple_type p_type;
+ unsigned char p_slot;
+ void __rcu **slots;
+ int i;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ p_slot = mte_parent_slot(mas->node);
+ p_type = mas_parent_type(mas, mas->node);
+ parent = mte_parent(mas->node);
+ slots = ma_slots(parent, p_type);
+ MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
+
+ /* Check prev/next parent slot for duplicate node entry */
+
+ for (i = 0; i < mt_slots[p_type]; i++) {
+ node = mas_slot(mas, slots, i);
+ if (i == p_slot) {
+ if (node != mas->node)
+ pr_err("parent %p[%u] does not have %p\n",
+ parent, i, mas_mn(mas));
+ MT_BUG_ON(mas->tree, node != mas->node);
+ } else if (node == mas->node) {
+ pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
+ mas_mn(mas), parent, i, p_slot);
+ MT_BUG_ON(mas->tree, node == mas->node);
+ }
+ }
+}
+
+static void mas_validate_child_slot(struct ma_state *mas)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
+ struct maple_enode *child;
+ unsigned char i;
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ child = mas_slot(mas, slots, i);
+
+ if (!child) {
+ pr_err("Non-leaf node lacks child at %p[%u]\n",
+ mas_mn(mas), i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (mte_parent_slot(child) != i) {
+ pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
+ mas_mn(mas), i, mte_to_node(child),
+ mte_parent_slot(child));
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (mte_parent(child) != mte_to_node(mas->node)) {
+ pr_err("child %p has parent %p not %p\n",
+ mte_to_node(child), mte_parent(child),
+ mte_to_node(mas->node));
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (i < mt_pivots[type] && pivots[i] == mas->max)
+ break;
+ }
+}
+
+/*
+ * Validate all pivots are within mas->min and mas->max, check metadata ends
+ * where the maximum ends and ensure there is no slots or pivots set outside of
+ * the end of the data.
+ */
+static void mas_validate_limits(struct ma_state *mas)
+{
+ int i;
+ unsigned long prev_piv = 0;
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ unsigned long piv;
+
+ piv = mas_safe_pivot(mas, pivots, i, type);
+
+ if (!piv && (i != 0)) {
+ pr_err("Missing node limit pivot at %p[%u]",
+ mas_mn(mas), i);
+ MAS_WARN_ON(mas, 1);
+ }
+
+ if (prev_piv > piv) {
+ pr_err("%p[%u] piv %lu < prev_piv %lu\n",
+ mas_mn(mas), i, piv, prev_piv);
+ MAS_WARN_ON(mas, piv < prev_piv);
+ }
+
+ if (piv < mas->min) {
+ pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
+ piv, mas->min);
+ MAS_WARN_ON(mas, piv < mas->min);
+ }
+ if (piv > mas->max) {
+ pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
+ piv, mas->max);
+ MAS_WARN_ON(mas, piv > mas->max);
+ }
+ prev_piv = piv;
+ if (piv == mas->max)
+ break;
+ }
+
+ if (mas_data_end(mas) != i) {
+ pr_err("node%p: data_end %u != the last slot offset %u\n",
+ mas_mn(mas), mas_data_end(mas), i);
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ for (i += 1; i < mt_slots[type]; i++) {
+ void *entry = mas_slot(mas, slots, i);
+
+ if (entry && (i != mt_slots[type] - 1)) {
+ pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
+ i, entry);
+ MT_BUG_ON(mas->tree, entry != NULL);
+ }
+
+ if (i < mt_pivots[type]) {
+ unsigned long piv = pivots[i];
+
+ if (!piv)
+ continue;
+
+ pr_err("%p[%u] should not have piv %lu\n",
+ mas_mn(mas), i, piv);
+ MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
+ }
+ }
+}
+
+static void mt_validate_nulls(struct maple_tree *mt)
+{
+ void *entry, *last = (void *)1;
+ unsigned char offset = 0;
+ void __rcu **slots;
+ MA_STATE(mas, mt, 0, 0);
+
+ mas_start(&mas);
+ if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
+ return;
+
+ while (!mte_is_leaf(mas.node))
+ mas_descend(&mas);
+
+ slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
+ do {
+ entry = mas_slot(&mas, slots, offset);
+ if (!last && !entry) {
+ pr_err("Sequential nulls end at %p[%u]\n",
+ mas_mn(&mas), offset);
+ }
+ MT_BUG_ON(mt, !last && !entry);
+ last = entry;
+ if (offset == mas_data_end(&mas)) {
+ mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
+ if (mas_is_none(&mas))
+ return;
+ offset = 0;
+ slots = ma_slots(mte_to_node(mas.node),
+ mte_node_type(mas.node));
+ } else {
+ offset++;
+ }
+
+ } while (!mas_is_none(&mas));
+}
+
+/*
+ * validate a maple tree by checking:
+ * 1. The limits (pivots are within mas->min to mas->max)
+ * 2. The gap is correctly set in the parents
+ */
+void mt_validate(struct maple_tree *mt)
+{
+ unsigned char end;
+
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_start(&mas);
+ if (!mas_searchable(&mas))
+ goto done;
+
+ while (!mte_is_leaf(mas.node))
+ mas_descend(&mas);
+
+ while (!mas_is_none(&mas)) {
+ MAS_WARN_ON(&mas, mte_dead_node(mas.node));
+ end = mas_data_end(&mas);
+ if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
+ (mas.max != ULONG_MAX))) {
+ pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
+ }
+
+ mas_validate_parent_slot(&mas);
+ mas_validate_limits(&mas);
+ mas_validate_child_slot(&mas);
+ if (mt_is_alloc(mt))
+ mas_validate_gaps(&mas);
+ mas_dfs_postorder(&mas, ULONG_MAX);
+ }
+ mt_validate_nulls(mt);
+done:
+ rcu_read_unlock();
+
+}
+EXPORT_SYMBOL_GPL(mt_validate);
+
+void mas_dump(const struct ma_state *mas)
+{
+ pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
+ if (mas_is_none(mas))
+ pr_err("(MAS_NONE) ");
+ else if (mas_is_ptr(mas))
+ pr_err("(MAS_ROOT) ");
+ else if (mas_is_start(mas))
+ pr_err("(MAS_START) ");
+ else if (mas_is_paused(mas))
+ pr_err("(MAS_PAUSED) ");
+
+ pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
+ pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
+ mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
+ if (mas->index > mas->last)
+ pr_err("Check index & last\n");
+}
+EXPORT_SYMBOL_GPL(mas_dump);
+
+void mas_wr_dump(const struct ma_wr_state *wr_mas)
+{
+ pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
+ wr_mas->node, wr_mas->r_min, wr_mas->r_max);
+ pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
+ wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
+ wr_mas->end_piv);
+}
+EXPORT_SYMBOL_GPL(mas_wr_dump);
+
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
new file mode 100644
index 000000000..0634b428d
--- /dev/null
+++ b/lib/math/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CORDIC
+ tristate "CORDIC algorithm"
+ help
+ This option provides an implementation of the CORDIC algorithm;
+ calculations are in fixed point. Module will be called cordic.
+
+config PRIME_NUMBERS
+ tristate "Simple prime number generator for testing"
+ help
+ This option provides a simple prime number generator for test
+ modules.
+
+ If unsure, say N.
+
+config RATIONAL
+ tristate
diff --git a/lib/math/Makefile b/lib/math/Makefile
new file mode 100644
index 000000000..91fcdb0c9
--- /dev/null
+++ b/lib/math/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += div64.o gcd.o lcm.o int_log.o int_pow.o int_sqrt.o reciprocal_div.o
+
+obj-$(CONFIG_CORDIC) += cordic.o
+obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
+obj-$(CONFIG_RATIONAL) += rational.o
+
+obj-$(CONFIG_TEST_DIV64) += test_div64.o
+obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
diff --git a/lib/math/cordic.c b/lib/math/cordic.c
new file mode 100644
index 000000000..8ef27c129
--- /dev/null
+++ b/lib/math/cordic.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/cordic.h>
+
+static const s32 arctan_table[] = {
+ 2949120,
+ 1740967,
+ 919879,
+ 466945,
+ 234379,
+ 117304,
+ 58666,
+ 29335,
+ 14668,
+ 7334,
+ 3667,
+ 1833,
+ 917,
+ 458,
+ 229,
+ 115,
+ 57,
+ 29
+};
+
+/*
+ * cordic_calc_iq() - calculates the i/q coordinate for given angle
+ *
+ * theta: angle in degrees for which i/q coordinate is to be calculated
+ * coord: function output parameter holding the i/q coordinate
+ */
+struct cordic_iq cordic_calc_iq(s32 theta)
+{
+ struct cordic_iq coord;
+ s32 angle, valtmp;
+ unsigned iter;
+ int signx = 1;
+ int signtheta;
+
+ coord.i = CORDIC_ANGLE_GEN;
+ coord.q = 0;
+ angle = 0;
+
+ theta = CORDIC_FIXED(theta);
+ signtheta = (theta < 0) ? -1 : 1;
+ theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
+ CORDIC_FIXED(180) * signtheta;
+
+ if (CORDIC_FLOAT(theta) > 90) {
+ theta -= CORDIC_FIXED(180);
+ signx = -1;
+ } else if (CORDIC_FLOAT(theta) < -90) {
+ theta += CORDIC_FIXED(180);
+ signx = -1;
+ }
+
+ for (iter = 0; iter < CORDIC_NUM_ITER; iter++) {
+ if (theta > angle) {
+ valtmp = coord.i - (coord.q >> iter);
+ coord.q += (coord.i >> iter);
+ angle += arctan_table[iter];
+ } else {
+ valtmp = coord.i + (coord.q >> iter);
+ coord.q -= (coord.i >> iter);
+ angle -= arctan_table[iter];
+ }
+ coord.i = valtmp;
+ }
+
+ coord.i *= signx;
+ coord.q *= signx;
+ return coord;
+}
+EXPORT_SYMBOL(cordic_calc_iq);
+
+MODULE_DESCRIPTION("CORDIC algorithm");
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/math/div64.c b/lib/math/div64.c
new file mode 100644
index 000000000..55a81782e
--- /dev/null
+++ b/lib/math/div64.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ *
+ * Based on former do_div() implementation from asm-parisc/div64.h:
+ * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ *
+ * Generic C version of 64bit/32bit division and modulo, with
+ * 64bit result and 32bit remainder.
+ *
+ * The fast case for (n>>32 == 0) is handled inline by do_div().
+ *
+ * Code generated for this function might be very inefficient
+ * for some CPUs. __div64_32() can be overridden by linking arch-specific
+ * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
+ * or by defining a preprocessor macro in arch/include/asm/div64.h.
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/math64.h>
+#include <linux/log2.h>
+
+/* Not needed on 64bit architectures */
+#if BITS_PER_LONG == 32
+
+#ifndef __div64_32
+uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
+{
+ uint64_t rem = *n;
+ uint64_t b = base;
+ uint64_t res, d = 1;
+ uint32_t high = rem >> 32;
+
+ /* Reduce the thing a bit first */
+ res = 0;
+ if (high >= base) {
+ high /= base;
+ res = (uint64_t) high << 32;
+ rem -= (uint64_t) (high*base) << 32;
+ }
+
+ while ((int64_t)b > 0 && b < rem) {
+ b = b+b;
+ d = d+d;
+ }
+
+ do {
+ if (rem >= b) {
+ rem -= b;
+ res += d;
+ }
+ b >>= 1;
+ d >>= 1;
+ } while (d);
+
+ *n = res;
+ return rem;
+}
+EXPORT_SYMBOL(__div64_32);
+#endif
+
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ u64 quotient;
+
+ if (dividend < 0) {
+ quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+ *remainder = -*remainder;
+ if (divisor > 0)
+ quotient = -quotient;
+ } else {
+ quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+ if (divisor < 0)
+ quotient = -quotient;
+ }
+ return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
+/*
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ * @remainder: 64bit remainder
+ *
+ * This implementation is a comparable to algorithm used by div64_u64.
+ * But this operation, which includes math for calculating the remainder,
+ * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
+ * systems.
+ */
+#ifndef div64_u64_rem
+u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+ u32 high = divisor >> 32;
+ u64 quot;
+
+ if (high == 0) {
+ u32 rem32;
+ quot = div_u64_rem(dividend, divisor, &rem32);
+ *remainder = rem32;
+ } else {
+ int n = fls(high);
+ quot = div_u64(dividend >> n, divisor >> n);
+
+ if (quot != 0)
+ quot--;
+
+ *remainder = dividend - quot * divisor;
+ if (*remainder >= divisor) {
+ quot++;
+ *remainder -= divisor;
+ }
+ }
+
+ return quot;
+}
+EXPORT_SYMBOL(div64_u64_rem);
+#endif
+
+/*
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'. The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
+ */
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
+{
+ u32 high = divisor >> 32;
+ u64 quot;
+
+ if (high == 0) {
+ quot = div_u64(dividend, divisor);
+ } else {
+ int n = fls(high);
+ quot = div_u64(dividend >> n, divisor >> n);
+
+ if (quot != 0)
+ quot--;
+ if ((dividend - quot * divisor) >= divisor)
+ quot++;
+ }
+
+ return quot;
+}
+EXPORT_SYMBOL(div64_u64);
+#endif
+
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+ s64 quot, t;
+
+ quot = div64_u64(abs(dividend), abs(divisor));
+ t = (dividend ^ divisor) >> 63;
+
+ return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
+#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * Iterative div/mod for use when dividend is not expected to be much
+ * bigger than divisor.
+ */
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ return __iter_div_u64_rem(dividend, divisor, remainder);
+}
+EXPORT_SYMBOL(iter_div_u64_rem);
+
+#ifndef mul_u64_u64_div_u64
+u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
+{
+ u64 res = 0, div, rem;
+ int shift;
+
+ /* can a * b overflow ? */
+ if (ilog2(a) + ilog2(b) > 62) {
+ /*
+ * (b * a) / c is equal to
+ *
+ * (b / c) * a +
+ * (b % c) * a / c
+ *
+ * if nothing overflows. Can the 1st multiplication
+ * overflow? Yes, but we do not care: this can only
+ * happen if the end result can't fit in u64 anyway.
+ *
+ * So the code below does
+ *
+ * res = (b / c) * a;
+ * b = b % c;
+ */
+ div = div64_u64_rem(b, c, &rem);
+ res = div * a;
+ b = rem;
+
+ shift = ilog2(a) + ilog2(b) - 62;
+ if (shift > 0) {
+ /* drop precision */
+ b >>= shift;
+ c >>= shift;
+ if (!c)
+ return res;
+ }
+ }
+
+ return res + div64_u64(a * b, c);
+}
+EXPORT_SYMBOL(mul_u64_u64_div_u64);
+#endif
diff --git a/lib/math/gcd.c b/lib/math/gcd.c
new file mode 100644
index 000000000..e3b042214
--- /dev/null
+++ b/lib/math/gcd.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/gcd.h>
+#include <linux/export.h>
+
+/*
+ * This implements the binary GCD algorithm. (Often attributed to Stein,
+ * but as Knuth has noted, appears in a first-century Chinese math text.)
+ *
+ * This is faster than the division-based algorithm even on x86, which
+ * has decent hardware division.
+ */
+
+#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
+
+/* If __ffs is available, the even/odd algorithm benchmarks slower. */
+
+/**
+ * gcd - calculate and return the greatest common divisor of 2 unsigned longs
+ * @a: first value
+ * @b: second value
+ */
+unsigned long gcd(unsigned long a, unsigned long b)
+{
+ unsigned long r = a | b;
+
+ if (!a || !b)
+ return r;
+
+ b >>= __ffs(b);
+ if (b == 1)
+ return r & -r;
+
+ for (;;) {
+ a >>= __ffs(a);
+ if (a == 1)
+ return r & -r;
+ if (a == b)
+ return a << __ffs(r);
+
+ if (a < b)
+ swap(a, b);
+ a -= b;
+ }
+}
+
+#else
+
+/* If normalization is done by loops, the even/odd algorithm is a win. */
+unsigned long gcd(unsigned long a, unsigned long b)
+{
+ unsigned long r = a | b;
+
+ if (!a || !b)
+ return r;
+
+ /* Isolate lsbit of r */
+ r &= -r;
+
+ while (!(b & r))
+ b >>= 1;
+ if (b == r)
+ return r;
+
+ for (;;) {
+ while (!(a & r))
+ a >>= 1;
+ if (a == r)
+ return r;
+ if (a == b)
+ return a;
+
+ if (a < b)
+ swap(a, b);
+ a -= b;
+ a >>= 1;
+ if (a & r)
+ a += b;
+ a >>= 1;
+ }
+}
+
+#endif
+
+EXPORT_SYMBOL_GPL(gcd);
diff --git a/lib/math/int_log.c b/lib/math/int_log.c
new file mode 100644
index 000000000..8f9da3a2a
--- /dev/null
+++ b/lib/math/int_log.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
+/*
+ * Provides fixed-point logarithm operations.
+ *
+ * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/int_log.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/bug.h>
+
+static const unsigned short logtable[256] = {
+ 0x0000, 0x0171, 0x02e0, 0x044e, 0x05ba, 0x0725, 0x088e, 0x09f7,
+ 0x0b5d, 0x0cc3, 0x0e27, 0x0f8a, 0x10eb, 0x124b, 0x13aa, 0x1508,
+ 0x1664, 0x17bf, 0x1919, 0x1a71, 0x1bc8, 0x1d1e, 0x1e73, 0x1fc6,
+ 0x2119, 0x226a, 0x23ba, 0x2508, 0x2656, 0x27a2, 0x28ed, 0x2a37,
+ 0x2b80, 0x2cc8, 0x2e0f, 0x2f54, 0x3098, 0x31dc, 0x331e, 0x345f,
+ 0x359f, 0x36de, 0x381b, 0x3958, 0x3a94, 0x3bce, 0x3d08, 0x3e41,
+ 0x3f78, 0x40af, 0x41e4, 0x4319, 0x444c, 0x457f, 0x46b0, 0x47e1,
+ 0x4910, 0x4a3f, 0x4b6c, 0x4c99, 0x4dc5, 0x4eef, 0x5019, 0x5142,
+ 0x526a, 0x5391, 0x54b7, 0x55dc, 0x5700, 0x5824, 0x5946, 0x5a68,
+ 0x5b89, 0x5ca8, 0x5dc7, 0x5ee5, 0x6003, 0x611f, 0x623a, 0x6355,
+ 0x646f, 0x6588, 0x66a0, 0x67b7, 0x68ce, 0x69e4, 0x6af8, 0x6c0c,
+ 0x6d20, 0x6e32, 0x6f44, 0x7055, 0x7165, 0x7274, 0x7383, 0x7490,
+ 0x759d, 0x76aa, 0x77b5, 0x78c0, 0x79ca, 0x7ad3, 0x7bdb, 0x7ce3,
+ 0x7dea, 0x7ef0, 0x7ff6, 0x80fb, 0x81ff, 0x8302, 0x8405, 0x8507,
+ 0x8608, 0x8709, 0x8809, 0x8908, 0x8a06, 0x8b04, 0x8c01, 0x8cfe,
+ 0x8dfa, 0x8ef5, 0x8fef, 0x90e9, 0x91e2, 0x92db, 0x93d2, 0x94ca,
+ 0x95c0, 0x96b6, 0x97ab, 0x98a0, 0x9994, 0x9a87, 0x9b7a, 0x9c6c,
+ 0x9d5e, 0x9e4f, 0x9f3f, 0xa02e, 0xa11e, 0xa20c, 0xa2fa, 0xa3e7,
+ 0xa4d4, 0xa5c0, 0xa6ab, 0xa796, 0xa881, 0xa96a, 0xaa53, 0xab3c,
+ 0xac24, 0xad0c, 0xadf2, 0xaed9, 0xafbe, 0xb0a4, 0xb188, 0xb26c,
+ 0xb350, 0xb433, 0xb515, 0xb5f7, 0xb6d9, 0xb7ba, 0xb89a, 0xb97a,
+ 0xba59, 0xbb38, 0xbc16, 0xbcf4, 0xbdd1, 0xbead, 0xbf8a, 0xc065,
+ 0xc140, 0xc21b, 0xc2f5, 0xc3cf, 0xc4a8, 0xc580, 0xc658, 0xc730,
+ 0xc807, 0xc8de, 0xc9b4, 0xca8a, 0xcb5f, 0xcc34, 0xcd08, 0xcddc,
+ 0xceaf, 0xcf82, 0xd054, 0xd126, 0xd1f7, 0xd2c8, 0xd399, 0xd469,
+ 0xd538, 0xd607, 0xd6d6, 0xd7a4, 0xd872, 0xd93f, 0xda0c, 0xdad9,
+ 0xdba5, 0xdc70, 0xdd3b, 0xde06, 0xded0, 0xdf9a, 0xe063, 0xe12c,
+ 0xe1f5, 0xe2bd, 0xe385, 0xe44c, 0xe513, 0xe5d9, 0xe69f, 0xe765,
+ 0xe82a, 0xe8ef, 0xe9b3, 0xea77, 0xeb3b, 0xebfe, 0xecc1, 0xed83,
+ 0xee45, 0xef06, 0xefc8, 0xf088, 0xf149, 0xf209, 0xf2c8, 0xf387,
+ 0xf446, 0xf505, 0xf5c3, 0xf680, 0xf73e, 0xf7fb, 0xf8b7, 0xf973,
+ 0xfa2f, 0xfaea, 0xfba5, 0xfc60, 0xfd1a, 0xfdd4, 0xfe8e, 0xff47,
+};
+
+unsigned int intlog2(u32 value)
+{
+ /**
+ * returns: log2(value) * 2^24
+ * wrong result if value = 0 (log2(0) is undefined)
+ */
+ unsigned int msb;
+ unsigned int logentry;
+ unsigned int significand;
+ unsigned int interpolation;
+
+ if (unlikely(value == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ /* first detect the msb (count begins at 0) */
+ msb = fls(value) - 1;
+
+ /**
+ * now we use a logtable after the following method:
+ *
+ * log2(2^x * y) * 2^24 = x * 2^24 + log2(y) * 2^24
+ * where x = msb and therefore 1 <= y < 2
+ * first y is determined by shifting the value left
+ * so that msb is bit 31
+ * 0x00231f56 -> 0x8C7D5800
+ * the result is y * 2^31 -> "significand"
+ * then the highest 9 bits are used for a table lookup
+ * the highest bit is discarded because it's always set
+ * the highest nine bits in our example are 100011000
+ * so we would use the entry 0x18
+ */
+ significand = value << (31 - msb);
+ logentry = (significand >> 23) % ARRAY_SIZE(logtable);
+
+ /**
+ * last step we do is interpolation because of the
+ * limitations of the log table the error is that part of
+ * the significand which isn't used for lookup then we
+ * compute the ratio between the error and the next table entry
+ * and interpolate it between the log table entry used and the
+ * next one the biggest error possible is 0x7fffff
+ * (in our example it's 0x7D5800)
+ * needed value for next table entry is 0x800000
+ * so the interpolation is
+ * (error / 0x800000) * (logtable_next - logtable_current)
+ * in the implementation the division is moved to the end for
+ * better accuracy there is also an overflow correction if
+ * logtable_next is 256
+ */
+ interpolation = ((significand & 0x7fffff) *
+ ((logtable[(logentry + 1) % ARRAY_SIZE(logtable)] -
+ logtable[logentry]) & 0xffff)) >> 15;
+
+ /* now we return the result */
+ return ((msb << 24) + (logtable[logentry] << 8) + interpolation);
+}
+EXPORT_SYMBOL(intlog2);
+
+unsigned int intlog10(u32 value)
+{
+ /**
+ * returns: log10(value) * 2^24
+ * wrong result if value = 0 (log10(0) is undefined)
+ */
+ u64 log;
+
+ if (unlikely(value == 0)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ log = intlog2(value);
+
+ /**
+ * we use the following method:
+ * log10(x) = log2(x) * log10(2)
+ */
+
+ return (log * 646456993) >> 31;
+}
+EXPORT_SYMBOL(intlog10);
diff --git a/lib/math/int_pow.c b/lib/math/int_pow.c
new file mode 100644
index 000000000..0cf426e69
--- /dev/null
+++ b/lib/math/int_pow.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * An integer based power function
+ *
+ * Derived from drivers/video/backlight/pwm_bl.c
+ */
+
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/types.h>
+
+/**
+ * int_pow - computes the exponentiation of the given base and exponent
+ * @base: base which will be raised to the given power
+ * @exp: power to be raised to
+ *
+ * Computes: pow(base, exp), i.e. @base raised to the @exp power
+ */
+u64 int_pow(u64 base, unsigned int exp)
+{
+ u64 result = 1;
+
+ while (exp) {
+ if (exp & 1)
+ result *= base;
+ exp >>= 1;
+ base *= base;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(int_pow);
diff --git a/lib/math/int_sqrt.c b/lib/math/int_sqrt.c
new file mode 100644
index 000000000..a8170bb91
--- /dev/null
+++ b/lib/math/int_sqrt.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com>
+ *
+ * Based on the shift-and-subtract algorithm for computing integer
+ * square root from Guy L. Steele.
+ */
+
+#include <linux/export.h>
+#include <linux/bitops.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+
+/**
+ * int_sqrt - computes the integer square root
+ * @x: integer of which to calculate the sqrt
+ *
+ * Computes: floor(sqrt(x))
+ */
+unsigned long int_sqrt(unsigned long x)
+{
+ unsigned long b, m, y = 0;
+
+ if (x <= 1)
+ return x;
+
+ m = 1UL << (__fls(x) & ~1UL);
+ while (m != 0) {
+ b = y + m;
+ y >>= 1;
+
+ if (x >= b) {
+ x -= b;
+ y += m;
+ }
+ m >>= 2;
+ }
+
+ return y;
+}
+EXPORT_SYMBOL(int_sqrt);
+
+#if BITS_PER_LONG < 64
+/**
+ * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
+ * is expected.
+ * @x: 64bit integer of which to calculate the sqrt
+ */
+u32 int_sqrt64(u64 x)
+{
+ u64 b, m, y = 0;
+
+ if (x <= ULONG_MAX)
+ return int_sqrt((unsigned long) x);
+
+ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
+ while (m != 0) {
+ b = y + m;
+ y >>= 1;
+
+ if (x >= b) {
+ x -= b;
+ y += m;
+ }
+ m >>= 2;
+ }
+
+ return y;
+}
+EXPORT_SYMBOL(int_sqrt64);
+#endif
diff --git a/lib/math/lcm.c b/lib/math/lcm.c
new file mode 100644
index 000000000..6e0b2e736
--- /dev/null
+++ b/lib/math/lcm.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/compiler.h>
+#include <linux/gcd.h>
+#include <linux/export.h>
+#include <linux/lcm.h>
+
+/* Lowest common multiple */
+unsigned long lcm(unsigned long a, unsigned long b)
+{
+ if (a && b)
+ return (a / gcd(a, b)) * b;
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lcm);
+
+unsigned long lcm_not_zero(unsigned long a, unsigned long b)
+{
+ unsigned long l = lcm(a, b);
+
+ if (l)
+ return l;
+
+ return (b ? : a);
+}
+EXPORT_SYMBOL_GPL(lcm_not_zero);
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
new file mode 100644
index 000000000..d42cebf74
--- /dev/null
+++ b/lib/math/prime_numbers.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "prime numbers: " fmt
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/prime_numbers.h>
+#include <linux/slab.h>
+
+#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+
+struct primes {
+ struct rcu_head rcu;
+ unsigned long last, sz;
+ unsigned long primes[];
+};
+
+#if BITS_PER_LONG == 64
+static const struct primes small_primes = {
+ .last = 61,
+ .sz = 64,
+ .primes = {
+ BIT(2) |
+ BIT(3) |
+ BIT(5) |
+ BIT(7) |
+ BIT(11) |
+ BIT(13) |
+ BIT(17) |
+ BIT(19) |
+ BIT(23) |
+ BIT(29) |
+ BIT(31) |
+ BIT(37) |
+ BIT(41) |
+ BIT(43) |
+ BIT(47) |
+ BIT(53) |
+ BIT(59) |
+ BIT(61)
+ }
+};
+#elif BITS_PER_LONG == 32
+static const struct primes small_primes = {
+ .last = 31,
+ .sz = 32,
+ .primes = {
+ BIT(2) |
+ BIT(3) |
+ BIT(5) |
+ BIT(7) |
+ BIT(11) |
+ BIT(13) |
+ BIT(17) |
+ BIT(19) |
+ BIT(23) |
+ BIT(29) |
+ BIT(31)
+ }
+};
+#else
+#error "unhandled BITS_PER_LONG"
+#endif
+
+static DEFINE_MUTEX(lock);
+static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
+
+static unsigned long selftest_max;
+
+static bool slow_is_prime_number(unsigned long x)
+{
+ unsigned long y = int_sqrt(x);
+
+ while (y > 1) {
+ if ((x % y) == 0)
+ break;
+ y--;
+ }
+
+ return y == 1;
+}
+
+static unsigned long slow_next_prime_number(unsigned long x)
+{
+ while (x < ULONG_MAX && !slow_is_prime_number(++x))
+ ;
+
+ return x;
+}
+
+static unsigned long clear_multiples(unsigned long x,
+ unsigned long *p,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long m;
+
+ m = 2 * x;
+ if (m < start)
+ m = roundup(start, x);
+
+ while (m < end) {
+ __clear_bit(m, p);
+ m += x;
+ }
+
+ return x;
+}
+
+static bool expand_to_next_prime(unsigned long x)
+{
+ const struct primes *p;
+ struct primes *new;
+ unsigned long sz, y;
+
+ /* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
+ * there is always at least one prime p between n and 2n - 2.
+ * Equivalently, if n > 1, then there is always at least one prime p
+ * such that n < p < 2n.
+ *
+ * http://mathworld.wolfram.com/BertrandsPostulate.html
+ * https://en.wikipedia.org/wiki/Bertrand's_postulate
+ */
+ sz = 2 * x;
+ if (sz < x)
+ return false;
+
+ sz = round_up(sz, BITS_PER_LONG);
+ new = kmalloc(sizeof(*new) + bitmap_size(sz),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!new)
+ return false;
+
+ mutex_lock(&lock);
+ p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+ if (x < p->last) {
+ kfree(new);
+ goto unlock;
+ }
+
+ /* Where memory permits, track the primes using the
+ * Sieve of Eratosthenes. The sieve is to remove all multiples of known
+ * primes from the set, what remains in the set is therefore prime.
+ */
+ bitmap_fill(new->primes, sz);
+ bitmap_copy(new->primes, p->primes, p->sz);
+ for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
+ new->last = clear_multiples(y, new->primes, p->sz, sz);
+ new->sz = sz;
+
+ BUG_ON(new->last <= x);
+
+ rcu_assign_pointer(primes, new);
+ if (p != &small_primes)
+ kfree_rcu((struct primes *)p, rcu);
+
+unlock:
+ mutex_unlock(&lock);
+ return true;
+}
+
+static void free_primes(void)
+{
+ const struct primes *p;
+
+ mutex_lock(&lock);
+ p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
+ if (p != &small_primes) {
+ rcu_assign_pointer(primes, &small_primes);
+ kfree_rcu((struct primes *)p, rcu);
+ }
+ mutex_unlock(&lock);
+}
+
+/**
+ * next_prime_number - return the next prime number
+ * @x: the starting point for searching to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. The set of prime numbers is computed using the Sieve of
+ * Eratoshenes (on finding a prime, all multiples of that prime are removed
+ * from the set) enabling a fast lookup of the next prime number larger than
+ * @x. If the sieve fails (memory limitation), the search falls back to using
+ * slow trial-divison, up to the value of ULONG_MAX (which is reported as the
+ * final prime as a sentinel).
+ *
+ * Returns: the next prime number larger than @x
+ */
+unsigned long next_prime_number(unsigned long x)
+{
+ const struct primes *p;
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ while (x >= p->last) {
+ rcu_read_unlock();
+
+ if (!expand_to_next_prime(x))
+ return slow_next_prime_number(x);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ }
+ x = find_next_bit(p->primes, p->last, x + 1);
+ rcu_read_unlock();
+
+ return x;
+}
+EXPORT_SYMBOL(next_prime_number);
+
+/**
+ * is_prime_number - test whether the given number is prime
+ * @x: the number to test
+ *
+ * A prime number is an integer greater than 1 that is only divisible by
+ * itself and 1. Internally a cache of prime numbers is kept (to speed up
+ * searching for sequential primes, see next_prime_number()), but if the number
+ * falls outside of that cache, its primality is tested using trial-divison.
+ *
+ * Returns: true if @x is prime, false for composite numbers.
+ */
+bool is_prime_number(unsigned long x)
+{
+ const struct primes *p;
+ bool result;
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ while (x >= p->sz) {
+ rcu_read_unlock();
+
+ if (!expand_to_next_prime(x))
+ return slow_is_prime_number(x);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+ }
+ result = test_bit(x, p->primes);
+ rcu_read_unlock();
+
+ return result;
+}
+EXPORT_SYMBOL(is_prime_number);
+
+static void dump_primes(void)
+{
+ const struct primes *p;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ rcu_read_lock();
+ p = rcu_dereference(primes);
+
+ if (buf)
+ bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
+ pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
+ p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
+
+ rcu_read_unlock();
+
+ kfree(buf);
+}
+
+static int selftest(unsigned long max)
+{
+ unsigned long x, last;
+
+ if (!max)
+ return 0;
+
+ for (last = 0, x = 2; x < max; x++) {
+ bool slow = slow_is_prime_number(x);
+ bool fast = is_prime_number(x);
+
+ if (slow != fast) {
+ pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
+ x, slow ? "yes" : "no", fast ? "yes" : "no");
+ goto err;
+ }
+
+ if (!slow)
+ continue;
+
+ if (next_prime_number(last) != x) {
+ pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
+ last, x, next_prime_number(last));
+ goto err;
+ }
+ last = x;
+ }
+
+ pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
+ return 0;
+
+err:
+ dump_primes();
+ return -EINVAL;
+}
+
+static int __init primes_init(void)
+{
+ return selftest(selftest_max);
+}
+
+static void __exit primes_exit(void)
+{
+ free_primes();
+}
+
+module_init(primes_init);
+module_exit(primes_exit);
+
+module_param_named(selftest, selftest_max, ulong, 0400);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/lib/math/rational-test.c b/lib/math/rational-test.c
new file mode 100644
index 000000000..01611ddff
--- /dev/null
+++ b/lib/math/rational-test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#include <linux/rational.h>
+
+struct rational_test_param {
+ unsigned long num, den;
+ unsigned long max_num, max_den;
+ unsigned long exp_num, exp_den;
+
+ const char *name;
+};
+
+static const struct rational_test_param test_parameters[] = {
+ { 1230, 10, 100, 20, 100, 1, "Exceeds bounds, semi-convergent term > 1/2 last term" },
+ { 34567,100, 120, 20, 120, 1, "Exceeds bounds, semi-convergent term < 1/2 last term" },
+ { 1, 30, 100, 10, 0, 1, "Closest to zero" },
+ { 1, 19, 100, 10, 1, 10, "Closest to smallest non-zero" },
+ { 27,32, 16, 16, 11, 13, "Use convergent" },
+ { 1155, 7735, 255, 255, 33, 221, "Exact answer" },
+ { 87, 32, 70, 32, 68, 25, "Semiconvergent, numerator limit" },
+ { 14533, 4626, 15000, 2400, 7433, 2366, "Semiconvergent, denominator limit" },
+};
+
+static void get_desc(const struct rational_test_param *param, char *desc)
+{
+ strscpy(desc, param->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+/* Creates function rational_gen_params */
+KUNIT_ARRAY_PARAM(rational, test_parameters, get_desc);
+
+static void rational_test(struct kunit *test)
+{
+ const struct rational_test_param *param = (const struct rational_test_param *)test->param_value;
+ unsigned long n = 0, d = 0;
+
+ rational_best_approximation(param->num, param->den, param->max_num, param->max_den, &n, &d);
+ KUNIT_EXPECT_EQ(test, n, param->exp_num);
+ KUNIT_EXPECT_EQ(test, d, param->exp_den);
+}
+
+static struct kunit_case rational_test_cases[] = {
+ KUNIT_CASE_PARAM(rational_test, rational_gen_params),
+ {}
+};
+
+static struct kunit_suite rational_test_suite = {
+ .name = "rational",
+ .test_cases = rational_test_cases,
+};
+
+kunit_test_suites(&rational_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/rational.c b/lib/math/rational.c
new file mode 100644
index 000000000..ec59d426e
--- /dev/null
+++ b/lib/math/rational.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rational fractions
+ *
+ * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@scara.com>
+ * Copyright (C) 2019 Trent Piepho <tpiepho@gmail.com>
+ *
+ * helper functions when coping with rational numbers
+ */
+
+#include <linux/rational.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/minmax.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+
+/*
+ * calculate best rational approximation for a given fraction
+ * taking into account restricted register size, e.g. to find
+ * appropriate values for a pll with 5 bit denominator and
+ * 8 bit numerator register fields, trying to set up with a
+ * frequency ratio of 3.1415, one would say:
+ *
+ * rational_best_approximation(31415, 10000,
+ * (1 << 8) - 1, (1 << 5) - 1, &n, &d);
+ *
+ * you may look at given_numerator as a fixed point number,
+ * with the fractional part size described in given_denominator.
+ *
+ * for theoretical background, see:
+ * https://en.wikipedia.org/wiki/Continued_fraction
+ */
+
+void rational_best_approximation(
+ unsigned long given_numerator, unsigned long given_denominator,
+ unsigned long max_numerator, unsigned long max_denominator,
+ unsigned long *best_numerator, unsigned long *best_denominator)
+{
+ /* n/d is the starting rational, which is continually
+ * decreased each iteration using the Euclidean algorithm.
+ *
+ * dp is the value of d from the prior iteration.
+ *
+ * n2/d2, n1/d1, and n0/d0 are our successively more accurate
+ * approximations of the rational. They are, respectively,
+ * the current, previous, and two prior iterations of it.
+ *
+ * a is current term of the continued fraction.
+ */
+ unsigned long n, d, n0, d0, n1, d1, n2, d2;
+ n = given_numerator;
+ d = given_denominator;
+ n0 = d1 = 0;
+ n1 = d0 = 1;
+
+ for (;;) {
+ unsigned long dp, a;
+
+ if (d == 0)
+ break;
+ /* Find next term in continued fraction, 'a', via
+ * Euclidean algorithm.
+ */
+ dp = d;
+ a = n / d;
+ d = n % d;
+ n = dp;
+
+ /* Calculate the current rational approximation (aka
+ * convergent), n2/d2, using the term just found and
+ * the two prior approximations.
+ */
+ n2 = n0 + a * n1;
+ d2 = d0 + a * d1;
+
+ /* If the current convergent exceeds the maxes, then
+ * return either the previous convergent or the
+ * largest semi-convergent, the final term of which is
+ * found below as 't'.
+ */
+ if ((n2 > max_numerator) || (d2 > max_denominator)) {
+ unsigned long t = ULONG_MAX;
+
+ if (d1)
+ t = (max_denominator - d0) / d1;
+ if (n1)
+ t = min(t, (max_numerator - n0) / n1);
+
+ /* This tests if the semi-convergent is closer than the previous
+ * convergent. If d1 is zero there is no previous convergent as this
+ * is the 1st iteration, so always choose the semi-convergent.
+ */
+ if (!d1 || 2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ n1 = n0 + t * n1;
+ d1 = d0 + t * d1;
+ }
+ break;
+ }
+ n0 = n1;
+ n1 = n2;
+ d0 = d1;
+ d1 = d2;
+ }
+ *best_numerator = n1;
+ *best_denominator = d1;
+}
+
+EXPORT_SYMBOL(rational_best_approximation);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/math/reciprocal_div.c b/lib/math/reciprocal_div.c
new file mode 100644
index 000000000..6cb4adbb8
--- /dev/null
+++ b/lib/math/reciprocal_div.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/types.h>
+
+#include <linux/reciprocal_div.h>
+
+/*
+ * For a description of the algorithm please have a look at
+ * include/linux/reciprocal_div.h
+ */
+
+struct reciprocal_value reciprocal_value(u32 d)
+{
+ struct reciprocal_value R;
+ u64 m;
+ int l;
+
+ l = fls(d - 1);
+ m = ((1ULL << 32) * ((1ULL << l) - d));
+ do_div(m, d);
+ ++m;
+ R.m = (u32)m;
+ R.sh1 = min(l, 1);
+ R.sh2 = max(l - 1, 0);
+
+ return R;
+}
+EXPORT_SYMBOL(reciprocal_value);
+
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec)
+{
+ struct reciprocal_value_adv R;
+ u32 l, post_shift;
+ u64 mhigh, mlow;
+
+ /* ceil(log2(d)) */
+ l = fls(d - 1);
+ /* NOTE: mlow/mhigh could overflow u64 when l == 32. This case needs to
+ * be handled before calling "reciprocal_value_adv", please see the
+ * comment at include/linux/reciprocal_div.h.
+ */
+ WARN(l == 32,
+ "ceil(log2(0x%08x)) == 32, %s doesn't support such divisor",
+ d, __func__);
+ post_shift = l;
+ mlow = 1ULL << (32 + l);
+ do_div(mlow, d);
+ mhigh = (1ULL << (32 + l)) + (1ULL << (32 + l - prec));
+ do_div(mhigh, d);
+
+ for (; post_shift > 0; post_shift--) {
+ u64 lo = mlow >> 1, hi = mhigh >> 1;
+
+ if (lo >= hi)
+ break;
+
+ mlow = lo;
+ mhigh = hi;
+ }
+
+ R.m = (u32)mhigh;
+ R.sh = post_shift;
+ R.exp = l;
+ R.is_wide_m = mhigh > U32_MAX;
+
+ return R;
+}
+EXPORT_SYMBOL(reciprocal_value_adv);
diff --git a/lib/math/test_div64.c b/lib/math/test_div64.c
new file mode 100644
index 000000000..c15edd688
--- /dev/null
+++ b/lib/math/test_div64.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Maciej W. Rozycki
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+
+#include <asm/div64.h>
+
+#define TEST_DIV64_N_ITER 1024
+
+static const u64 test_div64_dividends[] = {
+ 0x00000000ab275080,
+ 0x0000000fe73c1959,
+ 0x000000e54c0a74b1,
+ 0x00000d4398ff1ef9,
+ 0x0000a18c2ee1c097,
+ 0x00079fb80b072e4a,
+ 0x0072db27380dd689,
+ 0x0842f488162e2284,
+ 0xf66745411d8ab063,
+};
+#define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends)
+
+#define TEST_DIV64_DIVISOR_0 0x00000009
+#define TEST_DIV64_DIVISOR_1 0x0000007c
+#define TEST_DIV64_DIVISOR_2 0x00000204
+#define TEST_DIV64_DIVISOR_3 0x0000cb5b
+#define TEST_DIV64_DIVISOR_4 0x00010000
+#define TEST_DIV64_DIVISOR_5 0x0008a880
+#define TEST_DIV64_DIVISOR_6 0x003fd3ae
+#define TEST_DIV64_DIVISOR_7 0x0b658fac
+#define TEST_DIV64_DIVISOR_8 0xdc08b349
+
+static const u32 test_div64_divisors[] = {
+ TEST_DIV64_DIVISOR_0,
+ TEST_DIV64_DIVISOR_1,
+ TEST_DIV64_DIVISOR_2,
+ TEST_DIV64_DIVISOR_3,
+ TEST_DIV64_DIVISOR_4,
+ TEST_DIV64_DIVISOR_5,
+ TEST_DIV64_DIVISOR_6,
+ TEST_DIV64_DIVISOR_7,
+ TEST_DIV64_DIVISOR_8,
+};
+#define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors)
+
+static const struct {
+ u64 quotient;
+ u32 remainder;
+} test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = {
+ {
+ { 0x0000000013045e47, 0x00000001 },
+ { 0x000000000161596c, 0x00000030 },
+ { 0x000000000054e9d4, 0x00000130 },
+ { 0x000000000000d776, 0x0000278e },
+ { 0x000000000000ab27, 0x00005080 },
+ { 0x00000000000013c4, 0x0004ce80 },
+ { 0x00000000000002ae, 0x001e143c },
+ { 0x000000000000000f, 0x0033e56c },
+ { 0x0000000000000000, 0xab275080 },
+ }, {
+ { 0x00000001c45c02d1, 0x00000000 },
+ { 0x0000000020d5213c, 0x00000049 },
+ { 0x0000000007e3d65f, 0x000001dd },
+ { 0x0000000000140531, 0x000065ee },
+ { 0x00000000000fe73c, 0x00001959 },
+ { 0x000000000001d637, 0x0004e5d9 },
+ { 0x0000000000003fc9, 0x000713bb },
+ { 0x0000000000000165, 0x029abe7d },
+ { 0x0000000000000012, 0x6e9f7e37 },
+ }, {
+ { 0x000000197a3a0cf7, 0x00000002 },
+ { 0x00000001d9632e5c, 0x00000021 },
+ { 0x0000000071c28039, 0x000001cd },
+ { 0x000000000120a844, 0x0000b885 },
+ { 0x0000000000e54c0a, 0x000074b1 },
+ { 0x00000000001a7bb3, 0x00072331 },
+ { 0x00000000000397ad, 0x0002c61b },
+ { 0x000000000000141e, 0x06ea2e89 },
+ { 0x000000000000010a, 0xab002ad7 },
+ }, {
+ { 0x0000017949e37538, 0x00000001 },
+ { 0x0000001b62441f37, 0x00000055 },
+ { 0x0000000694a3391d, 0x00000085 },
+ { 0x0000000010b2a5d2, 0x0000a753 },
+ { 0x000000000d4398ff, 0x00001ef9 },
+ { 0x0000000001882ec6, 0x0005cbf9 },
+ { 0x000000000035333b, 0x0017abdf },
+ { 0x00000000000129f1, 0x0ab4520d },
+ { 0x0000000000000f6e, 0x8ac0ce9b },
+ }, {
+ { 0x000011f321a74e49, 0x00000006 },
+ { 0x0000014d8481d211, 0x0000005b },
+ { 0x0000005025cbd92d, 0x000001e3 },
+ { 0x00000000cb5e71e3, 0x000043e6 },
+ { 0x00000000a18c2ee1, 0x0000c097 },
+ { 0x0000000012a88828, 0x00036c97 },
+ { 0x000000000287f16f, 0x002c2a25 },
+ { 0x00000000000e2cc7, 0x02d581e3 },
+ { 0x000000000000bbf4, 0x1ba08c03 },
+ }, {
+ { 0x0000d8db8f72935d, 0x00000005 },
+ { 0x00000fbd5aed7a2e, 0x00000002 },
+ { 0x000003c84b6ea64a, 0x00000122 },
+ { 0x0000000998fa8829, 0x000044b7 },
+ { 0x000000079fb80b07, 0x00002e4a },
+ { 0x00000000e16b20fa, 0x0002a14a },
+ { 0x000000001e940d22, 0x00353b2e },
+ { 0x0000000000ab40ac, 0x06fba6ba },
+ { 0x000000000008debd, 0x72d98365 },
+ }, {
+ { 0x000cc3045b8fc281, 0x00000000 },
+ { 0x0000ed1f48b5c9fc, 0x00000079 },
+ { 0x000038fb9c63406a, 0x000000e1 },
+ { 0x000000909705b825, 0x00000a62 },
+ { 0x00000072db27380d, 0x0000d689 },
+ { 0x0000000d43fce827, 0x00082b09 },
+ { 0x00000001ccaba11a, 0x0037e8dd },
+ { 0x000000000a13f729, 0x0566dffd },
+ { 0x000000000085a14b, 0x23d36726 },
+ }, {
+ { 0x00eafeb9c993592b, 0x00000001 },
+ { 0x00110e5befa9a991, 0x00000048 },
+ { 0x00041947b4a1d36a, 0x000000dc },
+ { 0x00000a6679327311, 0x0000c079 },
+ { 0x00000842f488162e, 0x00002284 },
+ { 0x000000f4459740fc, 0x00084484 },
+ { 0x0000002122c47bf9, 0x002ca446 },
+ { 0x00000000b9936290, 0x004979c4 },
+ { 0x00000000099ca89d, 0x9db446bf },
+ }, {
+ { 0x1b60cece589da1d2, 0x00000001 },
+ { 0x01fcb42be1453f5b, 0x0000004f },
+ { 0x007a3f2457df0749, 0x0000013f },
+ { 0x0001363130e3ec7b, 0x000017aa },
+ { 0x0000f66745411d8a, 0x0000b063 },
+ { 0x00001c757dfab350, 0x00048863 },
+ { 0x000003dc4979c652, 0x00224ea7 },
+ { 0x000000159edc3144, 0x06409ab3 },
+ { 0x000000011eadfee3, 0xa99c48a8 },
+ },
+};
+
+static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j)
+{
+ return (quotient == test_div64_results[i][j].quotient &&
+ remainder == test_div64_results[i][j].remainder);
+}
+
+/*
+ * This needs to be a macro, because we don't want to rely on the compiler
+ * to do constant propagation, and `do_div' may take a different path for
+ * constants, so we do want to verify that as well.
+ */
+#define test_div64_one(dividend, divisor, i, j) ({ \
+ bool result = true; \
+ u64 quotient; \
+ u32 remainder; \
+ \
+ quotient = dividend; \
+ remainder = do_div(quotient, divisor); \
+ if (!test_div64_verify(quotient, remainder, i, j)) { \
+ pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \
+ dividend, divisor, quotient, remainder); \
+ pr_err("ERROR: expected value => %016llx,%08x\n",\
+ test_div64_results[i][j].quotient, \
+ test_div64_results[i][j].remainder); \
+ result = false; \
+ } \
+ result; \
+})
+
+/*
+ * Run calculation for the same divisor value expressed as a constant
+ * and as a variable, so as to verify the implementation for both cases
+ * should they be handled by different code execution paths.
+ */
+static bool __init test_div64(void)
+{
+ u64 dividend;
+ int i, j;
+
+ for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) {
+ dividend = test_div64_dividends[i];
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7))
+ return false;
+ if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8))
+ return false;
+ for (j = 0; j < SIZE_DIV64_DIVISORS; j++) {
+ if (!test_div64_one(dividend, test_div64_divisors[j],
+ i, j))
+ return false;
+ }
+ }
+ return true;
+}
+
+static int __init test_div64_init(void)
+{
+ struct timespec64 ts, ts0, ts1;
+ int i;
+
+ pr_info("Starting 64bit/32bit division and modulo test\n");
+ ktime_get_ts64(&ts0);
+
+ for (i = 0; i < TEST_DIV64_N_ITER; i++)
+ if (!test_div64())
+ break;
+
+ ktime_get_ts64(&ts1);
+ ts = timespec64_sub(ts1, ts0);
+ pr_info("Completed 64bit/32bit division and modulo test, "
+ "%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec);
+
+ return 0;
+}
+
+static void __exit test_div64_exit(void)
+{
+}
+
+module_init(test_div64_init);
+module_exit(test_div64_exit);
+
+MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("64bit/32bit division and modulo test module");
diff --git a/lib/memcat_p.c b/lib/memcat_p.c
new file mode 100644
index 000000000..b810fbc66
--- /dev/null
+++ b/lib/memcat_p.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+/*
+ * Merge two NULL-terminated pointer arrays into a newly allocated
+ * array, which is also NULL-terminated. Nomenclature is inspired by
+ * memset_p() and memcat() found elsewhere in the kernel source tree.
+ */
+void **__memcat_p(void **a, void **b)
+{
+ void **p = a, **new;
+ int nr;
+
+ /* count the elements in both arrays */
+ for (nr = 0, p = a; *p; nr++, p++)
+ ;
+ for (p = b; *p; nr++, p++)
+ ;
+ /* one for the NULL-terminator */
+ nr++;
+
+ new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ /* nr -> last index; p points to NULL in b[] */
+ for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
+ new[nr] = *p;
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(__memcat_p);
+
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
new file mode 100644
index 000000000..440aee705
--- /dev/null
+++ b/lib/memcpy_kunit.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcpy(), memmove(), and memset().
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+struct some_bytes {
+ union {
+ u8 data[32];
+ struct {
+ u32 one;
+ u16 two;
+ u8 three;
+ /* 1 byte hole */
+ u32 four[4];
+ };
+ };
+};
+
+#define check(instance, v) do { \
+ BUILD_BUG_ON(sizeof(instance.data) != 32); \
+ for (size_t i = 0; i < sizeof(instance.data); i++) { \
+ KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
+ "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
+ __LINE__, #instance, v, i, instance.data[i]); \
+ } \
+} while (0)
+
+#define compare(name, one, two) do { \
+ BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
+ for (size_t i = 0; i < sizeof(one); i++) { \
+ KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
+ "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
+ __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
+ } \
+ kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
+} while (0)
+
+static void memcpy_test(struct kunit *test)
+{
+#define TEST_OP "memcpy"
+ struct some_bytes control = {
+ .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes zero = { };
+ struct some_bytes middle = {
+ .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes three = {
+ .data = { 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ },
+ };
+ struct some_bytes dest = { };
+ int count;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x20);
+ check(zero, 0);
+ compare("static initializers", dest, zero);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memcpy(dest.data, zero.data, sizeof(dest.data));
+ compare("complete overwrite", dest, zero);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memcpy(dest.data + 12, zero.data, 7);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ count = 1;
+ memcpy(ptr++, zero.data, count++);
+ ptr += 8;
+ memcpy(ptr++, zero.data, count++);
+ compare("argument side-effects", dest, three);
+#undef TEST_OP
+}
+
+static unsigned char larger_array [2048];
+
+static void memmove_test(struct kunit *test)
+{
+#define TEST_OP "memmove"
+ struct some_bytes control = {
+ .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes zero = { };
+ struct some_bytes middle = {
+ .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes five = {
+ .data = { 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x00, 0x00, 0x00, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes overlap = {
+ .data = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes overlap_expected = {
+ .data = { 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
+ },
+ };
+ struct some_bytes dest = { };
+ int count;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x99);
+ check(zero, 0);
+ compare("static initializers", zero, dest);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memmove(dest.data, zero.data, sizeof(dest.data));
+ compare("complete overwrite", dest, zero);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memmove(dest.data + 12, zero.data, 7);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ count = 2;
+ memmove(ptr++, zero.data, count++);
+ ptr += 9;
+ memmove(ptr++, zero.data, count++);
+ compare("argument side-effects", dest, five);
+
+ /* Verify overlapping overwrite is correct. */
+ ptr = &overlap.data[2];
+ memmove(ptr, overlap.data, 5);
+ compare("overlapping write", overlap, overlap_expected);
+
+ /* Verify larger overlapping moves. */
+ larger_array[256] = 0xAAu;
+ /*
+ * Test a backwards overlapping memmove first. 256 and 1024 are
+ * important for i386 to use rep movsl.
+ */
+ memmove(larger_array, larger_array + 256, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0x00);
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1));
+ /* Test a forwards overlapping memmove. */
+ larger_array[0] = 0xBBu;
+ memmove(larger_array + 256, larger_array, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu);
+ KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1));
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257));
+#undef TEST_OP
+}
+
+static void memset_test(struct kunit *test)
+{
+#define TEST_OP "memset"
+ struct some_bytes control = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes complete = {
+ .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+ };
+ struct some_bytes middle = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
+ 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes three = {
+ .data = { 0x60, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x61, 0x61, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ },
+ };
+ struct some_bytes after = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
+ },
+ };
+ struct some_bytes startat = {
+ .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
+ },
+ };
+ struct some_bytes dest = { };
+ int count, value;
+ u8 *ptr;
+
+ /* Verify static initializers. */
+ check(control, 0x30);
+ check(dest, 0);
+
+ /* Verify assignment. */
+ dest = control;
+ compare("direct assignment", dest, control);
+
+ /* Verify complete overwrite. */
+ memset(dest.data, 0xff, sizeof(dest.data));
+ compare("complete overwrite", dest, complete);
+
+ /* Verify middle overwrite. */
+ dest = control;
+ memset(dest.data + 4, 0x31, 16);
+ compare("middle overwrite", dest, middle);
+
+ /* Verify argument side-effects aren't repeated. */
+ dest = control;
+ ptr = dest.data;
+ value = 0x60;
+ count = 1;
+ memset(ptr++, value++, count++);
+ ptr += 8;
+ memset(ptr++, value++, count++);
+ compare("argument side-effects", dest, three);
+
+ /* Verify memset_after() */
+ dest = control;
+ memset_after(&dest, 0x72, three);
+ compare("memset_after()", dest, after);
+
+ /* Verify memset_startat() */
+ dest = control;
+ memset_startat(&dest, 0x79, four);
+ compare("memset_startat()", dest, startat);
+#undef TEST_OP
+}
+
+static u8 large_src[1024];
+static u8 large_dst[2048];
+static const u8 large_zero[2048];
+
+static void set_random_nonzero(struct kunit *test, u8 *byte)
+{
+ int failed_rng = 0;
+
+ while (*byte == 0) {
+ get_random_bytes(byte, 1);
+ KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100,
+ "Is the RNG broken?");
+ }
+}
+
+static void init_large(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
+ kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
+
+ /* Get many bit patterns. */
+ get_random_bytes(large_src, ARRAY_SIZE(large_src));
+
+ /* Make sure we have non-zero edges. */
+ set_random_nonzero(test, &large_src[0]);
+ set_random_nonzero(test, &large_src[ARRAY_SIZE(large_src) - 1]);
+
+ /* Explicitly zero the entire destination. */
+ memset(large_dst, 0, ARRAY_SIZE(large_dst));
+}
+
+/*
+ * Instead of an indirect function call for "copy" or a giant macro,
+ * use a bool to pick memcpy or memmove.
+ */
+static void copy_large_test(struct kunit *test, bool use_memmove)
+{
+ init_large(test);
+
+ /* Copy a growing number of non-overlapping bytes ... */
+ for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
+ /* Over a shifting destination window ... */
+ for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) {
+ int right_zero_pos = offset + bytes;
+ int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ /* Copy! */
+ if (use_memmove)
+ memmove(large_dst + offset, large_src, bytes);
+ else
+ memcpy(large_dst + offset, large_src, bytes);
+
+ /* Did we touch anything before the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst, large_zero, offset), 0,
+ "with size %d at offset %d", bytes, offset);
+ /* Did we touch anything after the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Are we byte-for-byte exact across the copy? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst + offset, large_src, bytes), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Zero out what we copied for the next cycle. */
+ memset(large_dst + offset, 0, bytes);
+ }
+ /* Avoid stall warnings if this loop gets slow. */
+ cond_resched();
+ }
+}
+
+static void memcpy_large_test(struct kunit *test)
+{
+ copy_large_test(test, false);
+}
+
+static void memmove_large_test(struct kunit *test)
+{
+ copy_large_test(test, true);
+}
+
+/*
+ * On the assumption that boundary conditions are going to be the most
+ * sensitive, instead of taking a full step (inc) each iteration,
+ * take single index steps for at least the first "inc"-many indexes
+ * from the "start" and at least the last "inc"-many indexes before
+ * the "end". When in the middle, take full "inc"-wide steps. For
+ * example, calling next_step(idx, 1, 15, 3) with idx starting at 0
+ * would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15.
+ */
+static int next_step(int idx, int start, int end, int inc)
+{
+ start += inc;
+ end -= inc;
+
+ if (idx < start || idx + inc > end)
+ inc = 1;
+ return idx + inc;
+}
+
+static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off)
+{
+ int left_zero_pos, left_zero_size;
+ int right_zero_pos, right_zero_size;
+ int src_pos, src_orig_pos, src_size;
+ int pos;
+
+ /* Place the source in the destination buffer. */
+ memcpy(&large_dst[s_off], large_src, bytes);
+
+ /* Copy to destination offset. */
+ memmove(&large_dst[d_off], &large_dst[s_off], bytes);
+
+ /* Make sure destination entirely matches. */
+ KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Calculate the expected zero spans. */
+ if (s_off < d_off) {
+ left_zero_pos = 0;
+ left_zero_size = s_off;
+
+ right_zero_pos = d_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = s_off;
+ src_orig_pos = 0;
+ src_size = d_off - s_off;
+ } else {
+ left_zero_pos = 0;
+ left_zero_size = d_off;
+
+ right_zero_pos = s_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = d_off + bytes;
+ src_orig_pos = src_pos - s_off;
+ src_size = right_zero_pos - src_pos;
+ }
+
+ /* Check non-overlapping source is unchanged.*/
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Check leading buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+ /* Check trailing buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Zero out everything not already zeroed.*/
+ pos = left_zero_pos + left_zero_size;
+ memset(&large_dst[pos], 0, right_zero_pos - pos);
+}
+
+static void memmove_overlap_test(struct kunit *test)
+{
+ /*
+ * Running all possible offset and overlap combinations takes a
+ * very long time. Instead, only check up to 128 bytes offset
+ * into the destination buffer (which should result in crossing
+ * cachelines), with a step size of 1 through 7 to try to skip some
+ * redundancy.
+ */
+ static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */
+ static const int bytes_step = 7;
+ static const int window_step = 7;
+
+ static const int bytes_start = 1;
+ static const int bytes_end = ARRAY_SIZE(large_src) + 1;
+
+ init_large(test);
+
+ /* Copy a growing number of overlapping bytes ... */
+ for (int bytes = bytes_start; bytes < bytes_end;
+ bytes = next_step(bytes, bytes_start, bytes_end, bytes_step)) {
+
+ /* Over a shifting destination window ... */
+ for (int d_off = 0; d_off < offset_max; d_off++) {
+ int s_start = max(d_off - bytes, 0);
+ int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src));
+
+ /* Over a shifting source window ... */
+ for (int s_off = s_start; s_off < s_end;
+ s_off = next_step(s_off, s_start, s_end, window_step))
+ inner_loop(test, bytes, d_off, s_off);
+
+ /* Avoid stall warnings. */
+ cond_resched();
+ }
+ }
+}
+
+static void strtomem_test(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
+static struct kunit_case memcpy_test_cases[] = {
+ KUNIT_CASE(memset_test),
+ KUNIT_CASE(memcpy_test),
+ KUNIT_CASE_SLOW(memcpy_large_test),
+ KUNIT_CASE_SLOW(memmove_test),
+ KUNIT_CASE_SLOW(memmove_large_test),
+ KUNIT_CASE_SLOW(memmove_overlap_test),
+ KUNIT_CASE(strtomem_test),
+ {}
+};
+
+static struct kunit_suite memcpy_test_suite = {
+ .name = "memcpy",
+ .test_cases = memcpy_test_cases,
+};
+
+kunit_test_suite(memcpy_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/memory-notifier-error-inject.c b/lib/memory-notifier-error-inject.c
new file mode 100644
index 000000000..2c46dde59
--- /dev/null
+++ b/lib/memory-notifier-error-inject.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memory.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify memory notifier priority");
+
+static struct notifier_err_inject memory_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_ONLINE) },
+ { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_OFFLINE) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("memory", notifier_err_inject_dir,
+ &memory_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_memory_notifier(&memory_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void err_inject_exit(void)
+{
+ unregister_memory_notifier(&memory_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("memory notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/memregion.c b/lib/memregion.c
new file mode 100644
index 000000000..be5cfa5a3
--- /dev/null
+++ b/lib/memregion.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* identifiers for device / performance-differentiated memory regions */
+#include <linux/idr.h>
+#include <linux/types.h>
+#include <linux/memregion.h>
+
+static DEFINE_IDA(memregion_ids);
+
+int memregion_alloc(gfp_t gfp)
+{
+ return ida_alloc(&memregion_ids, gfp);
+}
+EXPORT_SYMBOL(memregion_alloc);
+
+void memregion_free(int id)
+{
+ ida_free(&memregion_ids, id);
+}
+EXPORT_SYMBOL(memregion_free);
diff --git a/lib/memweight.c b/lib/memweight.c
new file mode 100644
index 000000000..94dd72cca
--- /dev/null
+++ b/lib/memweight.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+
+/**
+ * memweight - count the total number of bits set in memory area
+ * @ptr: pointer to the start of the area
+ * @bytes: the size of the area
+ */
+size_t memweight(const void *ptr, size_t bytes)
+{
+ size_t ret = 0;
+ size_t longs;
+ const unsigned char *bitmap = ptr;
+
+ for (; bytes > 0 && ((unsigned long)bitmap) % sizeof(long);
+ bytes--, bitmap++)
+ ret += hweight8(*bitmap);
+
+ longs = bytes / sizeof(long);
+ if (longs) {
+ BUG_ON(longs >= INT_MAX / BITS_PER_LONG);
+ ret += bitmap_weight((unsigned long *)bitmap,
+ longs * BITS_PER_LONG);
+ bytes -= longs * sizeof(long);
+ bitmap += longs * sizeof(long);
+ }
+ /*
+ * The reason that this last loop is distinct from the preceding
+ * bitmap_weight() call is to compute 1-bits in the last region smaller
+ * than sizeof(long) properly on big-endian systems.
+ */
+ for (; bytes > 0; bytes--, bitmap++)
+ ret += hweight8(*bitmap);
+
+ return ret;
+}
+EXPORT_SYMBOL(memweight);
diff --git a/lib/muldi3.c b/lib/muldi3.c
new file mode 100644
index 000000000..9150ac130
--- /dev/null
+++ b/lib/muldi3.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+
+#include <linux/export.h>
+#include <linux/libgcc.h>
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ unsigned long __x0, __x1, __x2, __x3; \
+ unsigned short __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart(u); \
+ __uh = __ll_highpart(u); \
+ __vl = __ll_lowpart(v); \
+ __vh = __ll_highpart(v); \
+ \
+ __x0 = (unsigned long) __ul * __vl; \
+ __x1 = (unsigned long) __ul * __vh; \
+ __x2 = (unsigned long) __uh * __vl; \
+ __x3 = (unsigned long) __uh * __vh; \
+ \
+ __x1 += __ll_highpart(__x0); /* this can't give carry */\
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos */ \
+ \
+ (w1) = __x3 + __ll_highpart(__x1); \
+ (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+ } while (0)
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) ({ \
+ DWunion __w; \
+ umul_ppmm(__w.s.high, __w.s.low, u, v); \
+ __w.ll; \
+ })
+#endif
+
+long long notrace __muldi3(long long u, long long v)
+{
+ const DWunion uu = {.ll = u};
+ const DWunion vv = {.ll = v};
+ DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
+
+ w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high
+ + (unsigned long) uu.s.high * (unsigned long) vv.s.low);
+
+ return w.ll;
+}
+EXPORT_SYMBOL(__muldi3);
diff --git a/lib/net_utils.c b/lib/net_utils.c
new file mode 100644
index 000000000..42bb0473f
--- /dev/null
+++ b/lib/net_utils.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+
+bool mac_pton(const char *s, u8 *mac)
+{
+ size_t maxlen = 3 * ETH_ALEN - 1;
+ int i;
+
+ /* XX:XX:XX:XX:XX:XX */
+ if (strnlen(s, maxlen) < maxlen)
+ return false;
+
+ /* Don't dirty result unless string is valid MAC. */
+ for (i = 0; i < ETH_ALEN; i++) {
+ if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
+ return false;
+ if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
+ return false;
+ }
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
+ }
+ return true;
+}
+EXPORT_SYMBOL(mac_pton);
diff --git a/lib/netdev-notifier-error-inject.c b/lib/netdev-notifier-error-inject.c
new file mode 100644
index 000000000..bb930f279
--- /dev/null
+++ b/lib/netdev-notifier-error-inject.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify netdevice notifier priority");
+
+static struct notifier_err_inject netdev_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) },
+ { NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int netdev_err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("netdev", notifier_err_inject_dir,
+ &netdev_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_netdevice_notifier(&netdev_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void netdev_err_inject_exit(void)
+{
+ unregister_netdevice_notifier(&netdev_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(netdev_err_inject_init);
+module_exit(netdev_err_inject_exit);
+
+MODULE_DESCRIPTION("Netdevice notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
diff --git a/lib/nlattr.c b/lib/nlattr.c
new file mode 100644
index 000000000..7a2b6c38f
--- /dev/null
+++ b/lib/nlattr.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NETLINK Netlink attributes
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/nospec.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/netlink.h>
+
+/* For these data types, attribute length should be exactly the given
+ * size. However, to maintain compatibility with broken commands, if the
+ * attribute length does not match the expected size a warning is emitted
+ * to the user that the command is sending invalid data and needs to be fixed.
+ */
+static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
+ [NLA_U8] = sizeof(u8),
+ [NLA_U16] = sizeof(u16),
+ [NLA_U32] = sizeof(u32),
+ [NLA_U64] = sizeof(u64),
+ [NLA_S8] = sizeof(s8),
+ [NLA_S16] = sizeof(s16),
+ [NLA_S32] = sizeof(s32),
+ [NLA_S64] = sizeof(s64),
+};
+
+static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
+ [NLA_U8] = sizeof(u8),
+ [NLA_U16] = sizeof(u16),
+ [NLA_U32] = sizeof(u32),
+ [NLA_U64] = sizeof(u64),
+ [NLA_MSECS] = sizeof(u64),
+ [NLA_NESTED] = NLA_HDRLEN,
+ [NLA_S8] = sizeof(s8),
+ [NLA_S16] = sizeof(s16),
+ [NLA_S32] = sizeof(s32),
+ [NLA_S64] = sizeof(s64),
+};
+
+/*
+ * Nested policies might refer back to the original
+ * policy in some cases, and userspace could try to
+ * abuse that and recurse by nesting in the right
+ * ways. Limit recursion to avoid this problem.
+ */
+#define MAX_POLICY_RECURSION_DEPTH 10
+
+static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb, unsigned int depth);
+
+static int validate_nla_bitfield32(const struct nlattr *nla,
+ const u32 valid_flags_mask)
+{
+ const struct nla_bitfield32 *bf = nla_data(nla);
+
+ if (!valid_flags_mask)
+ return -EINVAL;
+
+ /*disallow invalid bit selector */
+ if (bf->selector & ~valid_flags_mask)
+ return -EINVAL;
+
+ /*disallow invalid bit values */
+ if (bf->value & ~valid_flags_mask)
+ return -EINVAL;
+
+ /*disallow valid bit values that are not selected*/
+ if (bf->value & ~bf->selector)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack,
+ unsigned int validate, unsigned int depth)
+{
+ const struct nlattr *entry;
+ int rem;
+
+ nla_for_each_attr(entry, head, len, rem) {
+ int ret;
+
+ if (nla_len(entry) == 0)
+ continue;
+
+ if (nla_len(entry) < NLA_HDRLEN) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
+ "Array element too short");
+ return -ERANGE;
+ }
+
+ ret = __nla_validate_parse(nla_data(entry), nla_len(entry),
+ maxtype, policy, validate, extack,
+ NULL, depth + 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void nla_get_range_unsigned(const struct nla_policy *pt,
+ struct netlink_range_validation *range)
+{
+ WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR &&
+ (pt->min < 0 || pt->max < 0));
+
+ range->min = 0;
+
+ switch (pt->type) {
+ case NLA_U8:
+ range->max = U8_MAX;
+ break;
+ case NLA_U16:
+ case NLA_BE16:
+ case NLA_BINARY:
+ range->max = U16_MAX;
+ break;
+ case NLA_U32:
+ case NLA_BE32:
+ range->max = U32_MAX;
+ break;
+ case NLA_U64:
+ case NLA_MSECS:
+ range->max = U64_MAX;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
+ range->min = pt->min;
+ range->max = pt->max;
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ *range = *pt->range;
+ break;
+ case NLA_VALIDATE_MIN:
+ range->min = pt->min;
+ break;
+ case NLA_VALIDATE_MAX:
+ range->max = pt->max;
+ break;
+ default:
+ break;
+ }
+}
+
+static int nla_validate_range_unsigned(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
+{
+ struct netlink_range_validation range;
+ u64 value;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ case NLA_MSECS:
+ value = nla_get_u64(nla);
+ break;
+ case NLA_BINARY:
+ value = nla_len(nla);
+ break;
+ case NLA_BE16:
+ value = ntohs(nla_get_be16(nla));
+ break;
+ case NLA_BE32:
+ value = ntohl(nla_get_be32(nla));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nla_get_range_unsigned(pt, &range);
+
+ if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG &&
+ pt->type == NLA_BINARY && value > range.max) {
+ pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+ current->comm, pt->type);
+ if (validate & NL_VALIDATE_STRICT_ATTRS) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+
+ /* this assumes min <= max (don't validate against min) */
+ return 0;
+ }
+
+ if (value < range.min || value > range.max) {
+ bool binary = pt->type == NLA_BINARY;
+
+ if (binary)
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "binary attribute size out of range");
+ else
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+void nla_get_range_signed(const struct nla_policy *pt,
+ struct netlink_range_validation_signed *range)
+{
+ switch (pt->type) {
+ case NLA_S8:
+ range->min = S8_MIN;
+ range->max = S8_MAX;
+ break;
+ case NLA_S16:
+ range->min = S16_MIN;
+ range->max = S16_MAX;
+ break;
+ case NLA_S32:
+ range->min = S32_MIN;
+ range->max = S32_MAX;
+ break;
+ case NLA_S64:
+ range->min = S64_MIN;
+ range->max = S64_MAX;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_RANGE:
+ range->min = pt->min;
+ range->max = pt->max;
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ *range = *pt->range_signed;
+ break;
+ case NLA_VALIDATE_MIN:
+ range->min = pt->min;
+ break;
+ case NLA_VALIDATE_MAX:
+ range->max = pt->max;
+ break;
+ default:
+ break;
+ }
+}
+
+static int nla_validate_int_range_signed(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct netlink_range_validation_signed range;
+ s64 value;
+
+ switch (pt->type) {
+ case NLA_S8:
+ value = nla_get_s8(nla);
+ break;
+ case NLA_S16:
+ value = nla_get_s16(nla);
+ break;
+ case NLA_S32:
+ value = nla_get_s32(nla);
+ break;
+ case NLA_S64:
+ value = nla_get_s64(nla);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nla_get_range_signed(pt, &range);
+
+ if (value < range.min || value > range.max) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int nla_validate_int_range(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
+{
+ switch (pt->type) {
+ case NLA_U8:
+ case NLA_U16:
+ case NLA_U32:
+ case NLA_U64:
+ case NLA_MSECS:
+ case NLA_BINARY:
+ case NLA_BE16:
+ case NLA_BE32:
+ return nla_validate_range_unsigned(pt, nla, extack, validate);
+ case NLA_S8:
+ case NLA_S16:
+ case NLA_S32:
+ case NLA_S64:
+ return nla_validate_int_range_signed(pt, nla, extack);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+static int nla_validate_mask(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ u64 value;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ case NLA_BE16:
+ value = ntohs(nla_get_be16(nla));
+ break;
+ case NLA_BE32:
+ value = ntohl(nla_get_be32(nla));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (value & ~(u64)pt->mask) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int validate_nla(const struct nlattr *nla, int maxtype,
+ const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack, unsigned int depth)
+{
+ u16 strict_start_type = policy[0].strict_start_type;
+ const struct nla_policy *pt;
+ int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+ int err = -ERANGE;
+
+ if (strict_start_type && type >= strict_start_type)
+ validate |= NL_VALIDATE_STRICT;
+
+ if (type <= 0 || type > maxtype)
+ return 0;
+
+ type = array_index_nospec(type, maxtype + 1);
+ pt = &policy[type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+
+ if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
+ pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+ current->comm, type);
+ if (validate & NL_VALIDATE_STRICT_ATTRS) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+ }
+
+ if (validate & NL_VALIDATE_NESTED) {
+ if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
+ !(nla->nla_type & NLA_F_NESTED)) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED is missing");
+ return -EINVAL;
+ }
+ if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
+ pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED not expected");
+ return -EINVAL;
+ }
+ }
+
+ switch (pt->type) {
+ case NLA_REJECT:
+ if (extack && pt->reject_message) {
+ NL_SET_BAD_ATTR(extack, nla);
+ extack->_msg = pt->reject_message;
+ return -EINVAL;
+ }
+ err = -EINVAL;
+ goto out_err;
+
+ case NLA_FLAG:
+ if (attrlen > 0)
+ goto out_err;
+ break;
+
+ case NLA_BITFIELD32:
+ if (attrlen != sizeof(struct nla_bitfield32))
+ goto out_err;
+
+ err = validate_nla_bitfield32(nla, pt->bitfield32_valid);
+ if (err)
+ goto out_err;
+ break;
+
+ case NLA_NUL_STRING:
+ if (pt->len)
+ minlen = min_t(int, attrlen, pt->len + 1);
+ else
+ minlen = attrlen;
+
+ if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
+ err = -EINVAL;
+ goto out_err;
+ }
+ fallthrough;
+
+ case NLA_STRING:
+ if (attrlen < 1)
+ goto out_err;
+
+ if (pt->len) {
+ char *buf = nla_data(nla);
+
+ if (buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ if (attrlen > pt->len)
+ goto out_err;
+ }
+ break;
+
+ case NLA_BINARY:
+ if (pt->len && attrlen > pt->len)
+ goto out_err;
+ break;
+
+ case NLA_NESTED:
+ /* a nested attributes is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->nested_policy) {
+ err = __nla_validate_parse(nla_data(nla), nla_len(nla),
+ pt->len, pt->nested_policy,
+ validate, extack, NULL,
+ depth + 1);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
+ case NLA_NESTED_ARRAY:
+ /* a nested array attribute is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ if (attrlen < NLA_HDRLEN)
+ goto out_err;
+ if (pt->nested_policy) {
+ int err;
+
+ err = nla_validate_array(nla_data(nla), nla_len(nla),
+ pt->len, pt->nested_policy,
+ extack, validate, depth);
+ if (err < 0) {
+ /*
+ * return directly to preserve the inner
+ * error message/attribute pointer
+ */
+ return err;
+ }
+ }
+ break;
+
+ case NLA_UNSPEC:
+ if (validate & NL_VALIDATE_UNSPEC) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Unsupported attribute");
+ return -EINVAL;
+ }
+ if (attrlen < pt->len)
+ goto out_err;
+ break;
+
+ default:
+ if (pt->len)
+ minlen = pt->len;
+ else
+ minlen = nla_attr_minlen[pt->type];
+
+ if (attrlen < minlen)
+ goto out_err;
+ }
+
+ /* further validation */
+ switch (pt->validation_type) {
+ case NLA_VALIDATE_NONE:
+ /* nothing to do */
+ break;
+ case NLA_VALIDATE_RANGE_PTR:
+ case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
+ case NLA_VALIDATE_MIN:
+ case NLA_VALIDATE_MAX:
+ err = nla_validate_int_range(pt, nla, extack, validate);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_MASK:
+ err = nla_validate_mask(pt, nla, extack);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_FUNCTION:
+ if (pt->validate) {
+ err = pt->validate(nla, extack);
+ if (err)
+ return err;
+ }
+ break;
+ }
+
+ return 0;
+out_err:
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "Attribute failed policy validation");
+ return err;
+}
+
+static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack,
+ struct nlattr **tb, unsigned int depth)
+{
+ const struct nlattr *nla;
+ int rem;
+
+ if (depth >= MAX_POLICY_RECURSION_DEPTH) {
+ NL_SET_ERR_MSG(extack,
+ "allowed policy recursion depth exceeded");
+ return -EINVAL;
+ }
+
+ if (tb)
+ memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+
+ nla_for_each_attr(nla, head, len, rem) {
+ u16 type = nla_type(nla);
+
+ if (type == 0 || type > maxtype) {
+ if (validate & NL_VALIDATE_MAXTYPE) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Unknown attribute type");
+ return -EINVAL;
+ }
+ continue;
+ }
+ type = array_index_nospec(type, maxtype + 1);
+ if (policy) {
+ int err = validate_nla(nla, maxtype, policy,
+ validate, extack, depth);
+
+ if (err < 0)
+ return err;
+ }
+
+ if (tb)
+ tb[type] = (struct nlattr *)nla;
+ }
+
+ if (unlikely(rem > 0)) {
+ pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
+ rem, current->comm);
+ NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
+ if (validate & NL_VALIDATE_TRAILING)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @validate: validation strictness
+ * @extack: extended ACK report struct
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Validation depends on the validate flags passed, see
+ * &enum netlink_validation for more details on that.
+ * See documentation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate_parse(head, len, maxtype, policy, validate,
+ extack, NULL, 0);
+}
+EXPORT_SYMBOL(__nla_validate);
+
+/**
+ * nla_policy_len - Determine the max. length of a policy
+ * @p: policy to use
+ * @n: number of policies
+ *
+ * Determines the max. length of the policy. It is currently used
+ * to allocated Netlink buffers roughly the size of the actual
+ * message.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int
+nla_policy_len(const struct nla_policy *p, int n)
+{
+ int i, len = 0;
+
+ for (i = 0; i < n; i++, p++) {
+ if (p->len)
+ len += nla_total_size(p->len);
+ else if (nla_attr_len[p->type])
+ len += nla_total_size(nla_attr_len[p->type]);
+ else if (nla_attr_minlen[p->type])
+ len += nla_total_size(nla_attr_minlen[p->type]);
+ }
+
+ return len;
+}
+EXPORT_SYMBOL(nla_policy_len);
+
+/**
+ * __nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @validate: validation strictness
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type.
+ * Validation is controlled by the @validate parameter.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __nla_parse(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate_parse(head, len, maxtype, policy, validate,
+ extack, tb, 0);
+}
+EXPORT_SYMBOL(__nla_parse);
+
+/**
+ * nla_find - Find a specific attribute in a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute in the stream matching the specified type.
+ */
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
+{
+ const struct nlattr *nla;
+ int rem;
+
+ nla_for_each_attr(nla, head, len, rem)
+ if (nla_type(nla) == attrtype)
+ return (struct nlattr *)nla;
+
+ return NULL;
+}
+EXPORT_SYMBOL(nla_find);
+
+/**
+ * nla_strscpy - Copy string attribute payload into a sized buffer
+ * @dst: Where to copy the string to.
+ * @nla: Attribute to copy the string from.
+ * @dstsize: Size of destination buffer.
+ *
+ * Copies at most dstsize - 1 bytes into the destination buffer.
+ * Unlike strlcpy the destination buffer is always padded out.
+ *
+ * Return:
+ * * srclen - Returns @nla length (not including the trailing %NUL).
+ * * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater
+ * than @dstsize.
+ */
+ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize)
+{
+ size_t srclen = nla_len(nla);
+ char *src = nla_data(nla);
+ ssize_t ret;
+ size_t len;
+
+ if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX))
+ return -E2BIG;
+
+ if (srclen > 0 && src[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, src, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
+
+ return ret;
+}
+EXPORT_SYMBOL(nla_strscpy);
+
+/**
+ * nla_strdup - Copy string attribute payload into a newly allocated buffer
+ * @nla: attribute to copy the string from
+ * @flags: the type of memory to allocate (see kmalloc).
+ *
+ * Returns a pointer to the allocated buffer or NULL on error.
+ */
+char *nla_strdup(const struct nlattr *nla, gfp_t flags)
+{
+ size_t srclen = nla_len(nla);
+ char *src = nla_data(nla), *dst;
+
+ if (srclen > 0 && src[srclen - 1] == '\0')
+ srclen--;
+
+ dst = kmalloc(srclen + 1, flags);
+ if (dst != NULL) {
+ memcpy(dst, src, srclen);
+ dst[srclen] = '\0';
+ }
+ return dst;
+}
+EXPORT_SYMBOL(nla_strdup);
+
+/**
+ * nla_memcpy - Copy a netlink attribute into another memory area
+ * @dest: where to copy to memcpy
+ * @src: netlink attribute to copy from
+ * @count: size of the destination area
+ *
+ * Note: The number of bytes copied is limited by the length of
+ * attribute's payload. memcpy
+ *
+ * Returns the number of bytes copied.
+ */
+int nla_memcpy(void *dest, const struct nlattr *src, int count)
+{
+ int minlen = min_t(int, count, nla_len(src));
+
+ memcpy(dest, nla_data(src), minlen);
+ if (count > minlen)
+ memset(dest + minlen, 0, count - minlen);
+
+ return minlen;
+}
+EXPORT_SYMBOL(nla_memcpy);
+
+/**
+ * nla_memcmp - Compare an attribute with sized memory area
+ * @nla: netlink attribute
+ * @data: memory area
+ * @size: size of memory area
+ */
+int nla_memcmp(const struct nlattr *nla, const void *data,
+ size_t size)
+{
+ int d = nla_len(nla) - size;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), data, size);
+
+ return d;
+}
+EXPORT_SYMBOL(nla_memcmp);
+
+/**
+ * nla_strcmp - Compare a string attribute against a string
+ * @nla: netlink string attribute
+ * @str: another string
+ */
+int nla_strcmp(const struct nlattr *nla, const char *str)
+{
+ int len = strlen(str);
+ char *buf = nla_data(nla);
+ int attrlen = nla_len(nla);
+ int d;
+
+ while (attrlen > 0 && buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ d = attrlen - len;
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+ return d;
+}
+EXPORT_SYMBOL(nla_strcmp);
+
+#ifdef CONFIG_NET
+/**
+ * __nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ struct nlattr *nla;
+
+ nla = skb_put(skb, nla_total_size(attrlen));
+ nla->nla_type = attrtype;
+ nla->nla_len = nla_attr_size(attrlen);
+
+ memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
+
+ return nla;
+}
+EXPORT_SYMBOL(__nla_reserve);
+
+/**
+ * __nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr)
+{
+ nla_align_64bit(skb, padattr);
+
+ return __nla_reserve(skb, attrtype, attrlen);
+}
+EXPORT_SYMBOL(__nla_reserve_64bit);
+
+/**
+ * __nla_reserve_nohdr - reserve room for attribute without header
+ * @skb: socket buffer to reserve room on
+ * @attrlen: length of attribute payload
+ *
+ * Reserves room for attribute payload without a header.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the payload.
+ */
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
+{
+ return skb_put_zero(skb, NLA_ALIGN(attrlen));
+}
+EXPORT_SYMBOL(__nla_reserve_nohdr);
+
+/**
+ * nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return NULL;
+
+ return __nla_reserve(skb, attrtype, attrlen);
+}
+EXPORT_SYMBOL(nla_reserve);
+
+/**
+ * nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ int padattr)
+{
+ size_t len;
+
+ if (nla_need_padding_for_64bit(skb))
+ len = nla_total_size_64bit(attrlen);
+ else
+ len = nla_total_size(attrlen);
+ if (unlikely(skb_tailroom(skb) < len))
+ return NULL;
+
+ return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+}
+EXPORT_SYMBOL(nla_reserve_64bit);
+
+/**
+ * nla_reserve_nohdr - reserve room for attribute without header
+ * @skb: socket buffer to reserve room on
+ * @attrlen: length of attribute payload
+ *
+ * Reserves room for attribute payload without a header.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return NULL;
+
+ return __nla_reserve_nohdr(skb, attrlen);
+}
+EXPORT_SYMBOL(nla_reserve_nohdr);
+
+/**
+ * __nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data)
+{
+ struct nlattr *nla;
+
+ nla = __nla_reserve(skb, attrtype, attrlen);
+ memcpy(nla_data(nla), data, attrlen);
+}
+EXPORT_SYMBOL(__nla_put);
+
+/**
+ * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr)
+{
+ struct nlattr *nla;
+
+ nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+ memcpy(nla_data(nla), data, attrlen);
+}
+EXPORT_SYMBOL(__nla_put_64bit);
+
+/**
+ * __nla_put_nohdr - Add a netlink attribute without header
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute payload.
+ */
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
+{
+ void *start;
+
+ start = __nla_reserve_nohdr(skb, attrlen);
+ memcpy(start, data, attrlen);
+}
+EXPORT_SYMBOL(__nla_put_nohdr);
+
+/**
+ * nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return -EMSGSIZE;
+
+ __nla_put(skb, attrtype, attrlen, data);
+ return 0;
+}
+EXPORT_SYMBOL(nla_put);
+
+/**
+ * nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr)
+{
+ size_t len;
+
+ if (nla_need_padding_for_64bit(skb))
+ len = nla_total_size_64bit(attrlen);
+ else
+ len = nla_total_size(attrlen);
+ if (unlikely(skb_tailroom(skb) < len))
+ return -EMSGSIZE;
+
+ __nla_put_64bit(skb, attrtype, attrlen, data, padattr);
+ return 0;
+}
+EXPORT_SYMBOL(nla_put_64bit);
+
+/**
+ * nla_put_nohdr - Add a netlink attribute without header
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return -EMSGSIZE;
+
+ __nla_put_nohdr(skb, attrlen, data);
+ return 0;
+}
+EXPORT_SYMBOL(nla_put_nohdr);
+
+/**
+ * nla_append - Add a netlink attribute without header or padding
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+int nla_append(struct sk_buff *skb, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return -EMSGSIZE;
+
+ skb_put_data(skb, data, attrlen);
+ return 0;
+}
+EXPORT_SYMBOL(nla_append);
+#endif
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
new file mode 100644
index 000000000..33c154264
--- /dev/null
+++ b/lib/nmi_backtrace.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NMI backtrace support
+ *
+ * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
+ * with the following header:
+ *
+ * HW NMI watchdog support
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Arch specific calls to support NMI watchdog
+ *
+ * Bits copied from original nmi.c file
+ */
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+#include <linux/nmi.h>
+#include <linux/cpu.h>
+#include <linux/sched/debug.h>
+
+#ifdef arch_trigger_cpumask_backtrace
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+
+/* "in progress" flag of arch_trigger_cpumask_backtrace */
+static unsigned long backtrace_flag;
+
+/*
+ * When raise() is called it will be passed a pointer to the
+ * backtrace_mask. Architectures that call nmi_cpu_backtrace()
+ * directly from their raise() functions may rely on the mask
+ * they are passed being updated as a side effect of this call.
+ */
+void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+ int exclude_cpu,
+ void (*raise)(cpumask_t *mask))
+{
+ int i, this_cpu = get_cpu();
+
+ if (test_and_set_bit(0, &backtrace_flag)) {
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ put_cpu();
+ return;
+ }
+
+ cpumask_copy(to_cpumask(backtrace_mask), mask);
+ if (exclude_cpu != -1)
+ cpumask_clear_cpu(exclude_cpu, to_cpumask(backtrace_mask));
+
+ /*
+ * Don't try to send an NMI to this cpu; it may work on some
+ * architectures, but on others it may not, and we'll get
+ * information at least as useful just by doing a dump_stack() here.
+ * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit.
+ */
+ if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask)))
+ nmi_cpu_backtrace(NULL);
+
+ if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+ pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
+ this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
+ nmi_backtrace_stall_snap(to_cpumask(backtrace_mask));
+ raise(to_cpumask(backtrace_mask));
+ }
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+ mdelay(1);
+ touch_softlockup_watchdog();
+ }
+ nmi_backtrace_stall_check(to_cpumask(backtrace_mask));
+
+ /*
+ * Force flush any remote buffers that might be stuck in IRQ context
+ * and therefore could not run their irq_work.
+ */
+ printk_trigger_flush();
+
+ clear_bit_unlock(0, &backtrace_flag);
+ put_cpu();
+}
+
+// Dump stacks even for idle CPUs.
+static bool backtrace_idle;
+module_param(backtrace_idle, bool, 0644);
+
+bool nmi_cpu_backtrace(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ unsigned long flags;
+
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ /*
+ * Allow nested NMI backtraces while serializing
+ * against other CPUs.
+ */
+ printk_cpu_sync_get_irqsave(flags);
+ if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
+ pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
+ cpu, (void *)instruction_pointer(regs));
+ } else {
+ pr_warn("NMI backtrace for cpu %d\n", cpu);
+ if (regs)
+ show_regs(regs);
+ else
+ dump_stack();
+ }
+ printk_cpu_sync_put_irqrestore(flags);
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ return true;
+ }
+
+ return false;
+}
+NOKPROBE_SYMBOL(nmi_cpu_backtrace);
+#endif
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
new file mode 100644
index 000000000..954c3412d
--- /dev/null
+++ b/lib/notifier-error-inject.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+#include "notifier-error-inject.h"
+
+static int debugfs_errno_set(void *data, u64 val)
+{
+ *(int *)data = clamp_t(int, val, -MAX_ERRNO, 0);
+ return 0;
+}
+
+static int debugfs_errno_get(void *data, u64 *val)
+{
+ *val = *(int *)data;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
+ "%lld\n");
+
+static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_errno);
+}
+
+static int notifier_err_inject_callback(struct notifier_block *nb,
+ unsigned long val, void *p)
+{
+ int err = 0;
+ struct notifier_err_inject *err_inject =
+ container_of(nb, struct notifier_err_inject, nb);
+ struct notifier_err_inject_action *action;
+
+ for (action = err_inject->actions; action->name; action++) {
+ if (action->val == val) {
+ err = action->error;
+ break;
+ }
+ }
+ if (err)
+ pr_info("Injecting error (%d) to %s\n", err, action->name);
+
+ return notifier_from_errno(err);
+}
+
+struct dentry *notifier_err_inject_dir;
+EXPORT_SYMBOL_GPL(notifier_err_inject_dir);
+
+struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
+ struct notifier_err_inject *err_inject, int priority)
+{
+ struct notifier_err_inject_action *action;
+ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ struct dentry *dir;
+ struct dentry *actions_dir;
+
+ err_inject->nb.notifier_call = notifier_err_inject_callback;
+ err_inject->nb.priority = priority;
+
+ dir = debugfs_create_dir(name, parent);
+
+ actions_dir = debugfs_create_dir("actions", dir);
+
+ for (action = err_inject->actions; action->name; action++) {
+ struct dentry *action_dir;
+
+ action_dir = debugfs_create_dir(action->name, actions_dir);
+
+ /*
+ * Create debugfs r/w file containing action->error. If
+ * notifier call chain is called with action->val, it will
+ * fail with the error code
+ */
+ debugfs_create_errno("error", mode, action_dir, &action->error);
+ }
+ return dir;
+}
+EXPORT_SYMBOL_GPL(notifier_err_inject_init);
+
+static int __init err_inject_init(void)
+{
+ notifier_err_inject_dir =
+ debugfs_create_dir("notifier-error-inject", NULL);
+
+ return 0;
+}
+
+static void __exit err_inject_exit(void)
+{
+ debugfs_remove_recursive(notifier_err_inject_dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("Notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/notifier-error-inject.h b/lib/notifier-error-inject.h
new file mode 100644
index 000000000..fafff5f2a
--- /dev/null
+++ b/lib/notifier-error-inject.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/notifier.h>
+
+struct notifier_err_inject_action {
+ unsigned long val;
+ int error;
+ const char *name;
+};
+
+#define NOTIFIER_ERR_INJECT_ACTION(action) \
+ .name = #action, .val = (action),
+
+struct notifier_err_inject {
+ struct notifier_block nb;
+ struct notifier_err_inject_action actions[];
+ /* The last slot must be terminated with zero sentinel */
+};
+
+extern struct dentry *notifier_err_inject_dir;
+
+extern struct dentry *notifier_err_inject_init(const char *name,
+ struct dentry *parent, struct notifier_err_inject *err_inject,
+ int priority);
diff --git a/lib/objagg.c b/lib/objagg.c
new file mode 100644
index 000000000..1e248629e
--- /dev/null
+++ b/lib/objagg.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rhashtable.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <linux/objagg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/objagg.h>
+
+struct objagg_hints {
+ struct rhashtable node_ht;
+ struct rhashtable_params ht_params;
+ struct list_head node_list;
+ unsigned int node_count;
+ unsigned int root_count;
+ unsigned int refcount;
+ const struct objagg_ops *ops;
+};
+
+struct objagg_hints_node {
+ struct rhash_head ht_node; /* member of objagg_hints->node_ht */
+ struct list_head list; /* member of objagg_hints->node_list */
+ struct objagg_hints_node *parent;
+ unsigned int root_id;
+ struct objagg_obj_stats_info stats_info;
+ unsigned long obj[];
+};
+
+static struct objagg_hints_node *
+objagg_hints_lookup(struct objagg_hints *objagg_hints, void *obj)
+{
+ if (!objagg_hints)
+ return NULL;
+ return rhashtable_lookup_fast(&objagg_hints->node_ht, obj,
+ objagg_hints->ht_params);
+}
+
+struct objagg {
+ const struct objagg_ops *ops;
+ void *priv;
+ struct rhashtable obj_ht;
+ struct rhashtable_params ht_params;
+ struct list_head obj_list;
+ unsigned int obj_count;
+ struct ida root_ida;
+ struct objagg_hints *hints;
+};
+
+struct objagg_obj {
+ struct rhash_head ht_node; /* member of objagg->obj_ht */
+ struct list_head list; /* member of objagg->obj_list */
+ struct objagg_obj *parent; /* if the object is nested, this
+ * holds pointer to parent, otherwise NULL
+ */
+ union {
+ void *delta_priv; /* user delta private */
+ void *root_priv; /* user root private */
+ };
+ unsigned int root_id;
+ unsigned int refcount; /* counts number of users of this object
+ * including nested objects
+ */
+ struct objagg_obj_stats stats;
+ unsigned long obj[];
+};
+
+static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
+{
+ return ++objagg_obj->refcount;
+}
+
+static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
+{
+ return --objagg_obj->refcount;
+}
+
+static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count++;
+ objagg_obj->stats.delta_user_count++;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count++;
+}
+
+static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count--;
+ objagg_obj->stats.delta_user_count--;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count--;
+}
+
+static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
+{
+ /* Nesting is not supported, so we can use ->parent
+ * to figure out if the object is root.
+ */
+ return !objagg_obj->parent;
+}
+
+/**
+ * objagg_obj_root_priv - obtains root private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Either the object is root itself when the private is returned
+ * directly, or the parent is root and its private is returned
+ * instead.
+ *
+ * Returns a user private root pointer.
+ */
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return objagg_obj->root_priv;
+ WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
+ return objagg_obj->parent->root_priv;
+}
+EXPORT_SYMBOL(objagg_obj_root_priv);
+
+/**
+ * objagg_obj_delta_priv - obtains delta private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private delta pointer or NULL in case the passed
+ * object is root.
+ */
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return NULL;
+ return objagg_obj->delta_priv;
+}
+EXPORT_SYMBOL(objagg_obj_delta_priv);
+
+/**
+ * objagg_obj_raw - obtains object user private pointer
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
+ */
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
+{
+ return objagg_obj->obj;
+}
+EXPORT_SYMBOL(objagg_obj_raw);
+
+static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
+{
+ return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
+}
+
+static int objagg_obj_parent_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_obj *parent,
+ bool take_parent_ref)
+{
+ void *delta_priv;
+
+ delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+ objagg_obj->obj);
+ if (IS_ERR(delta_priv))
+ return PTR_ERR(delta_priv);
+
+ /* User returned a delta private, that means that
+ * our object can be aggregated into the parent.
+ */
+ objagg_obj->parent = parent;
+ objagg_obj->delta_priv = delta_priv;
+ if (take_parent_ref)
+ objagg_obj_ref_inc(objagg_obj->parent);
+ trace_objagg_obj_parent_assign(objagg, objagg_obj,
+ parent,
+ parent->refcount);
+ return 0;
+}
+
+static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ struct objagg_obj *objagg_obj_cur;
+ int err;
+
+ list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
+ /* Nesting is not supported. In case the object
+ * is not root, it cannot be assigned as parent.
+ */
+ if (!objagg_obj_is_root(objagg_obj_cur))
+ continue;
+ err = objagg_obj_parent_assign(objagg, objagg_obj,
+ objagg_obj_cur, true);
+ if (!err)
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj);
+
+static void objagg_obj_parent_unassign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_parent_unassign(objagg, objagg_obj,
+ objagg_obj->parent,
+ objagg_obj->parent->refcount);
+ objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
+ __objagg_obj_put(objagg, objagg_obj->parent);
+}
+
+static int objagg_obj_root_id_alloc(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_hints_node *hnode)
+{
+ unsigned int min, max;
+ int root_id;
+
+ /* In case there are no hints available, the root id is invalid. */
+ if (!objagg->hints) {
+ objagg_obj->root_id = OBJAGG_OBJ_ROOT_ID_INVALID;
+ return 0;
+ }
+
+ if (hnode) {
+ min = hnode->root_id;
+ max = hnode->root_id;
+ } else {
+ /* For objects with no hint, start after the last
+ * hinted root_id.
+ */
+ min = objagg->hints->root_count;
+ max = ~0;
+ }
+
+ root_id = ida_alloc_range(&objagg->root_ida, min, max, GFP_KERNEL);
+
+ if (root_id < 0)
+ return root_id;
+ objagg_obj->root_id = root_id;
+ return 0;
+}
+
+static void objagg_obj_root_id_free(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg->hints)
+ return;
+ ida_free(&objagg->root_ida, objagg_obj->root_id);
+}
+
+static int objagg_obj_root_create(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_hints_node *hnode)
+{
+ int err;
+
+ err = objagg_obj_root_id_alloc(objagg, objagg_obj, hnode);
+ if (err)
+ return err;
+ objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
+ objagg_obj->obj,
+ objagg_obj->root_id);
+ if (IS_ERR(objagg_obj->root_priv)) {
+ err = PTR_ERR(objagg_obj->root_priv);
+ goto err_root_create;
+ }
+ trace_objagg_obj_root_create(objagg, objagg_obj);
+ return 0;
+
+err_root_create:
+ objagg_obj_root_id_free(objagg, objagg_obj);
+ return err;
+}
+
+static void objagg_obj_root_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_root_destroy(objagg, objagg_obj);
+ objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
+ objagg_obj_root_id_free(objagg, objagg_obj);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj);
+
+static int objagg_obj_init_with_hints(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ bool *hint_found)
+{
+ struct objagg_hints_node *hnode;
+ struct objagg_obj *parent;
+ int err;
+
+ hnode = objagg_hints_lookup(objagg->hints, objagg_obj->obj);
+ if (!hnode) {
+ *hint_found = false;
+ return 0;
+ }
+ *hint_found = true;
+
+ if (!hnode->parent)
+ return objagg_obj_root_create(objagg, objagg_obj, hnode);
+
+ parent = __objagg_obj_get(objagg, hnode->parent->obj);
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+
+ err = objagg_obj_parent_assign(objagg, objagg_obj, parent, false);
+ if (err) {
+ *hint_found = false;
+ err = 0;
+ goto err_parent_assign;
+ }
+
+ return 0;
+
+err_parent_assign:
+ objagg_obj_put(objagg, parent);
+ return err;
+}
+
+static int objagg_obj_init(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ bool hint_found;
+ int err;
+
+ /* First, try to use hints if they are available and
+ * if they provide result.
+ */
+ err = objagg_obj_init_with_hints(objagg, objagg_obj, &hint_found);
+ if (err)
+ return err;
+
+ if (hint_found)
+ return 0;
+
+ /* Try to find if the object can be aggregated under an existing one. */
+ err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
+ if (!err)
+ return 0;
+ /* If aggregation is not possible, make the object a root. */
+ return objagg_obj_root_create(objagg, objagg_obj, NULL);
+}
+
+static void objagg_obj_fini(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_is_root(objagg_obj))
+ objagg_obj_parent_unassign(objagg, objagg_obj);
+ else
+ objagg_obj_root_destroy(objagg, objagg_obj);
+}
+
+static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+ int err;
+
+ objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
+ GFP_KERNEL);
+ if (!objagg_obj)
+ return ERR_PTR(-ENOMEM);
+ objagg_obj_ref_inc(objagg_obj);
+ memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
+
+ err = objagg_obj_init(objagg, objagg_obj);
+ if (err)
+ goto err_obj_init;
+
+ err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ if (err)
+ goto err_ht_insert;
+ list_add(&objagg_obj->list, &objagg->obj_list);
+ objagg->obj_count++;
+ trace_objagg_obj_create(objagg, objagg_obj);
+
+ return objagg_obj;
+
+err_ht_insert:
+ objagg_obj_fini(objagg, objagg_obj);
+err_obj_init:
+ kfree(objagg_obj);
+ return ERR_PTR(err);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ /* First, try to find the object exactly as user passed it,
+ * perhaps it is already in use.
+ */
+ objagg_obj = objagg_obj_lookup(objagg, obj);
+ if (objagg_obj) {
+ objagg_obj_ref_inc(objagg_obj);
+ return objagg_obj;
+ }
+
+ return objagg_obj_create(objagg, obj);
+}
+
+/**
+ * objagg_obj_get - gets an object within objagg instance
+ * @objagg: objagg instance
+ * @obj: user-specific private object pointer
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Size of the "obj" memory is specified in "objagg->ops".
+ *
+ * There are 3 main options this function wraps:
+ * 1) The object according to "obj" already exist. In that case
+ * the reference counter is incrementes and the object is returned.
+ * 2) The object does not exist, but it can be aggregated within
+ * another object. In that case, user ops->delta_create() is called
+ * to obtain delta data and a new object is created with returned
+ * user-delta private pointer.
+ * 3) The object does not exist and cannot be aggregated into
+ * any of the existing objects. In that case, user ops->root_create()
+ * is called to create the root and a new object is created with
+ * returned user-root private pointer.
+ *
+ * Returns a pointer to objagg object instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ objagg_obj = __objagg_obj_get(objagg, obj);
+ if (IS_ERR(objagg_obj))
+ return objagg_obj;
+ objagg_obj_stats_inc(objagg_obj);
+ trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
+ return objagg_obj;
+}
+EXPORT_SYMBOL(objagg_obj_get);
+
+static void objagg_obj_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_destroy(objagg, objagg_obj);
+ --objagg->obj_count;
+ list_del(&objagg_obj->list);
+ rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ objagg_obj_fini(objagg, objagg_obj);
+ kfree(objagg_obj);
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_ref_dec(objagg_obj))
+ objagg_obj_destroy(objagg, objagg_obj);
+}
+
+/**
+ * objagg_obj_put - puts an object within objagg instance
+ * @objagg: objagg instance
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Symmetric to objagg_obj_get().
+ */
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
+ objagg_obj_stats_dec(objagg_obj);
+ __objagg_obj_put(objagg, objagg_obj);
+}
+EXPORT_SYMBOL(objagg_obj_put);
+
+/**
+ * objagg_create - creates a new objagg instance
+ * @ops: user-specific callbacks
+ * @objagg_hints: hints, can be NULL
+ * @priv: pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The purpose of the library is to provide an infrastructure to
+ * aggregate user-specified objects. Library does not care about the type
+ * of the object. User fills-up ops which take care of the specific
+ * user object manipulation.
+ *
+ * As a very stupid example, consider integer numbers. For example
+ * number 8 as a root object. That can aggregate number 9 with delta 1,
+ * number 10 with delta 2, etc. This example is implemented as
+ * a part of a testing module in test_objagg.c file.
+ *
+ * Each objagg instance contains multiple trees. Each tree node is
+ * represented by "an object". In the current implementation there can be
+ * only roots and leafs nodes. Leaf nodes are called deltas.
+ * But in general, this can be easily extended for intermediate nodes.
+ * In that extension, a delta would be associated with all non-root
+ * nodes.
+ *
+ * Returns a pointer to newly created objagg instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg *objagg_create(const struct objagg_ops *ops,
+ struct objagg_hints *objagg_hints, void *priv)
+{
+ struct objagg *objagg;
+ int err;
+
+ if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
+ !ops->delta_check || !ops->delta_create ||
+ !ops->delta_destroy))
+ return ERR_PTR(-EINVAL);
+
+ objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
+ if (!objagg)
+ return ERR_PTR(-ENOMEM);
+ objagg->ops = ops;
+ if (objagg_hints) {
+ objagg->hints = objagg_hints;
+ objagg_hints->refcount++;
+ }
+ objagg->priv = priv;
+ INIT_LIST_HEAD(&objagg->obj_list);
+
+ objagg->ht_params.key_len = ops->obj_size;
+ objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
+ objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
+
+ err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ ida_init(&objagg->root_ida);
+
+ trace_objagg_create(objagg);
+ return objagg;
+
+err_rhashtable_init:
+ kfree(objagg);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_create);
+
+/**
+ * objagg_destroy - destroys a new objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_destroy(struct objagg *objagg)
+{
+ trace_objagg_destroy(objagg);
+ ida_destroy(&objagg->root_ida);
+ WARN_ON(!list_empty(&objagg->obj_list));
+ rhashtable_destroy(&objagg->obj_ht);
+ if (objagg->hints)
+ objagg_hints_put(objagg->hints);
+ kfree(objagg);
+}
+EXPORT_SYMBOL(objagg_destroy);
+
+static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
+{
+ const struct objagg_obj_stats_info *stats_info1 = a;
+ const struct objagg_obj_stats_info *stats_info2 = b;
+
+ if (stats_info1->is_root != stats_info2->is_root)
+ return stats_info2->is_root - stats_info1->is_root;
+ if (stats_info1->stats.delta_user_count !=
+ stats_info2->stats.delta_user_count)
+ return stats_info2->stats.delta_user_count -
+ stats_info1->stats.delta_user_count;
+ return stats_info2->stats.user_count - stats_info1->stats.user_count;
+}
+
+/**
+ * objagg_stats_get - obtains stats of the objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all object
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case more objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_obj *objagg_obj;
+ int i;
+
+ objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
+ objagg->obj_count), GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
+ sizeof(objagg_stats->stats_info[0].stats));
+ objagg_stats->stats_info[i].objagg_obj = objagg_obj;
+ objagg_stats->stats_info[i].is_root =
+ objagg_obj_is_root(objagg_obj);
+ if (objagg_stats->stats_info[i].is_root)
+ objagg_stats->root_count++;
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_stats_get);
+
+/**
+ * objagg_stats_put - puts stats of the objagg instance
+ * @objagg_stats: objagg instance stats
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_stats_put(const struct objagg_stats *objagg_stats)
+{
+ kfree(objagg_stats);
+}
+EXPORT_SYMBOL(objagg_stats_put);
+
+static struct objagg_hints_node *
+objagg_hints_node_create(struct objagg_hints *objagg_hints,
+ struct objagg_obj *objagg_obj, size_t obj_size,
+ struct objagg_hints_node *parent_hnode)
+{
+ unsigned int user_count = objagg_obj->stats.user_count;
+ struct objagg_hints_node *hnode;
+ int err;
+
+ hnode = kzalloc(sizeof(*hnode) + obj_size, GFP_KERNEL);
+ if (!hnode)
+ return ERR_PTR(-ENOMEM);
+ memcpy(hnode->obj, &objagg_obj->obj, obj_size);
+ hnode->stats_info.stats.user_count = user_count;
+ hnode->stats_info.stats.delta_user_count = user_count;
+ if (parent_hnode) {
+ parent_hnode->stats_info.stats.delta_user_count += user_count;
+ } else {
+ hnode->root_id = objagg_hints->root_count++;
+ hnode->stats_info.is_root = true;
+ }
+ hnode->stats_info.objagg_obj = objagg_obj;
+
+ err = rhashtable_insert_fast(&objagg_hints->node_ht, &hnode->ht_node,
+ objagg_hints->ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ list_add(&hnode->list, &objagg_hints->node_list);
+ hnode->parent = parent_hnode;
+ objagg_hints->node_count++;
+
+ return hnode;
+
+err_ht_insert:
+ kfree(hnode);
+ return ERR_PTR(err);
+}
+
+static void objagg_hints_flush(struct objagg_hints *objagg_hints)
+{
+ struct objagg_hints_node *hnode, *tmp;
+
+ list_for_each_entry_safe(hnode, tmp, &objagg_hints->node_list, list) {
+ list_del(&hnode->list);
+ rhashtable_remove_fast(&objagg_hints->node_ht, &hnode->ht_node,
+ objagg_hints->ht_params);
+ kfree(hnode);
+ }
+}
+
+struct objagg_tmp_node {
+ struct objagg_obj *objagg_obj;
+ bool crossed_out;
+};
+
+struct objagg_tmp_graph {
+ struct objagg_tmp_node *nodes;
+ unsigned long nodes_count;
+ unsigned long *edges;
+};
+
+static int objagg_tmp_graph_edge_index(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ return index * graph->nodes_count + parent_index;
+}
+
+static void objagg_tmp_graph_edge_set(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ int edge_index = objagg_tmp_graph_edge_index(graph, index,
+ parent_index);
+
+ __set_bit(edge_index, graph->edges);
+}
+
+static bool objagg_tmp_graph_is_edge(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ int edge_index = objagg_tmp_graph_edge_index(graph, index,
+ parent_index);
+
+ return test_bit(edge_index, graph->edges);
+}
+
+static unsigned int objagg_tmp_graph_node_weight(struct objagg_tmp_graph *graph,
+ unsigned int index)
+{
+ struct objagg_tmp_node *node = &graph->nodes[index];
+ unsigned int weight = node->objagg_obj->stats.user_count;
+ int j;
+
+ /* Node weight is sum of node users and all other nodes users
+ * that this node can represent with delta.
+ */
+
+ for (j = 0; j < graph->nodes_count; j++) {
+ if (!objagg_tmp_graph_is_edge(graph, index, j))
+ continue;
+ node = &graph->nodes[j];
+ if (node->crossed_out)
+ continue;
+ weight += node->objagg_obj->stats.user_count;
+ }
+ return weight;
+}
+
+static int objagg_tmp_graph_node_max_weight(struct objagg_tmp_graph *graph)
+{
+ struct objagg_tmp_node *node;
+ unsigned int max_weight = 0;
+ unsigned int weight;
+ int max_index = -1;
+ int i;
+
+ for (i = 0; i < graph->nodes_count; i++) {
+ node = &graph->nodes[i];
+ if (node->crossed_out)
+ continue;
+ weight = objagg_tmp_graph_node_weight(graph, i);
+ if (weight >= max_weight) {
+ max_weight = weight;
+ max_index = i;
+ }
+ }
+ return max_index;
+}
+
+static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
+{
+ unsigned int nodes_count = objagg->obj_count;
+ struct objagg_tmp_graph *graph;
+ struct objagg_tmp_node *node;
+ struct objagg_tmp_node *pnode;
+ struct objagg_obj *objagg_obj;
+ int i, j;
+
+ graph = kzalloc(sizeof(*graph), GFP_KERNEL);
+ if (!graph)
+ return NULL;
+
+ graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL);
+ if (!graph->nodes)
+ goto err_nodes_alloc;
+ graph->nodes_count = nodes_count;
+
+ graph->edges = bitmap_zalloc(nodes_count * nodes_count, GFP_KERNEL);
+ if (!graph->edges)
+ goto err_edges_alloc;
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ node = &graph->nodes[i++];
+ node->objagg_obj = objagg_obj;
+ }
+
+ /* Assemble a temporary graph. Insert edge X->Y in case Y can be
+ * in delta of X.
+ */
+ for (i = 0; i < nodes_count; i++) {
+ for (j = 0; j < nodes_count; j++) {
+ if (i == j)
+ continue;
+ pnode = &graph->nodes[i];
+ node = &graph->nodes[j];
+ if (objagg->ops->delta_check(objagg->priv,
+ pnode->objagg_obj->obj,
+ node->objagg_obj->obj)) {
+ objagg_tmp_graph_edge_set(graph, i, j);
+
+ }
+ }
+ }
+ return graph;
+
+err_edges_alloc:
+ kfree(graph->nodes);
+err_nodes_alloc:
+ kfree(graph);
+ return NULL;
+}
+
+static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph)
+{
+ bitmap_free(graph->edges);
+ kfree(graph->nodes);
+ kfree(graph);
+}
+
+static int
+objagg_opt_simple_greedy_fillup_hints(struct objagg_hints *objagg_hints,
+ struct objagg *objagg)
+{
+ struct objagg_hints_node *hnode, *parent_hnode;
+ struct objagg_tmp_graph *graph;
+ struct objagg_tmp_node *node;
+ int index;
+ int j;
+ int err;
+
+ graph = objagg_tmp_graph_create(objagg);
+ if (!graph)
+ return -ENOMEM;
+
+ /* Find the nodes from the ones that can accommodate most users
+ * and cross them out of the graph. Save them to the hint list.
+ */
+ while ((index = objagg_tmp_graph_node_max_weight(graph)) != -1) {
+ node = &graph->nodes[index];
+ node->crossed_out = true;
+ hnode = objagg_hints_node_create(objagg_hints,
+ node->objagg_obj,
+ objagg->ops->obj_size,
+ NULL);
+ if (IS_ERR(hnode)) {
+ err = PTR_ERR(hnode);
+ goto out;
+ }
+ parent_hnode = hnode;
+ for (j = 0; j < graph->nodes_count; j++) {
+ if (!objagg_tmp_graph_is_edge(graph, index, j))
+ continue;
+ node = &graph->nodes[j];
+ if (node->crossed_out)
+ continue;
+ node->crossed_out = true;
+ hnode = objagg_hints_node_create(objagg_hints,
+ node->objagg_obj,
+ objagg->ops->obj_size,
+ parent_hnode);
+ if (IS_ERR(hnode)) {
+ err = PTR_ERR(hnode);
+ goto out;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ objagg_tmp_graph_destroy(graph);
+ return err;
+}
+
+struct objagg_opt_algo {
+ int (*fillup_hints)(struct objagg_hints *objagg_hints,
+ struct objagg *objagg);
+};
+
+static const struct objagg_opt_algo objagg_opt_simple_greedy = {
+ .fillup_hints = objagg_opt_simple_greedy_fillup_hints,
+};
+
+
+static const struct objagg_opt_algo *objagg_opt_algos[] = {
+ [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
+};
+
+static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ struct objagg_hints *objagg_hints =
+ container_of(ht, struct objagg_hints, node_ht);
+ const struct objagg_ops *ops = objagg_hints->ops;
+ const char *ptr = obj;
+
+ ptr += ht->p.key_offset;
+ return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
+ memcmp(ptr, arg->key, ht->p.key_len);
+}
+
+/**
+ * objagg_hints_get - obtains hints instance
+ * @objagg: objagg instance
+ * @opt_algo_type: type of hints finding algorithm
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * According to the algo type, the existing objects of objagg instance
+ * are going to be went-through to assemble an optimal tree. We call this
+ * tree hints. These hints can be later on used for creation of
+ * a new objagg instance. There, the future object creations are going
+ * to be consulted with these hints in order to find out, where exactly
+ * the new object should be put as a root or delta.
+ *
+ * Returns a pointer to hints instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ enum objagg_opt_algo_type opt_algo_type)
+{
+ const struct objagg_opt_algo *algo = objagg_opt_algos[opt_algo_type];
+ struct objagg_hints *objagg_hints;
+ int err;
+
+ objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL);
+ if (!objagg_hints)
+ return ERR_PTR(-ENOMEM);
+
+ objagg_hints->ops = objagg->ops;
+ objagg_hints->refcount = 1;
+
+ INIT_LIST_HEAD(&objagg_hints->node_list);
+
+ objagg_hints->ht_params.key_len = objagg->ops->obj_size;
+ objagg_hints->ht_params.key_offset =
+ offsetof(struct objagg_hints_node, obj);
+ objagg_hints->ht_params.head_offset =
+ offsetof(struct objagg_hints_node, ht_node);
+ objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
+
+ err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = algo->fillup_hints(objagg_hints, objagg);
+ if (err)
+ goto err_fillup_hints;
+
+ if (WARN_ON(objagg_hints->node_count != objagg->obj_count)) {
+ err = -EINVAL;
+ goto err_node_count_check;
+ }
+
+ return objagg_hints;
+
+err_node_count_check:
+err_fillup_hints:
+ objagg_hints_flush(objagg_hints);
+ rhashtable_destroy(&objagg_hints->node_ht);
+err_rhashtable_init:
+ kfree(objagg_hints);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_hints_get);
+
+/**
+ * objagg_hints_put - puts hints instance
+ * @objagg_hints: objagg hints instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_hints_put(struct objagg_hints *objagg_hints)
+{
+ if (--objagg_hints->refcount)
+ return;
+ objagg_hints_flush(objagg_hints);
+ rhashtable_destroy(&objagg_hints->node_ht);
+ kfree(objagg_hints);
+}
+EXPORT_SYMBOL(objagg_hints_put);
+
+/**
+ * objagg_hints_stats_get - obtains stats of the hints instance
+ * @objagg_hints: hints instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all objects
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case multiple objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *
+objagg_hints_stats_get(struct objagg_hints *objagg_hints)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_hints_node *hnode;
+ int i;
+
+ objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
+ objagg_hints->node_count),
+ GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(hnode, &objagg_hints->node_list, list) {
+ memcpy(&objagg_stats->stats_info[i], &hnode->stats_info,
+ sizeof(objagg_stats->stats_info[0]));
+ if (objagg_stats->stats_info[i].is_root)
+ objagg_stats->root_count++;
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_hints_stats_get);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/of-reconfig-notifier-error-inject.c b/lib/of-reconfig-notifier-error-inject.c
new file mode 100644
index 000000000..b26f16402
--- /dev/null
+++ b/lib/of-reconfig-notifier-error-inject.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify OF reconfig notifier priority");
+
+static struct notifier_err_inject reconfig_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_ATTACH_NODE) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_DETACH_NODE) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_ADD_PROPERTY) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_REMOVE_PROPERTY) },
+ { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_UPDATE_PROPERTY) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("OF-reconfig",
+ notifier_err_inject_dir, &reconfig_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = of_reconfig_notifier_register(&reconfig_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void err_inject_exit(void)
+{
+ of_reconfig_notifier_unregister(&reconfig_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("OF reconfig notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
new file mode 100644
index 000000000..fe6705cfd
--- /dev/null
+++ b/lib/oid_registry.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* ASN.1 Object identifier (OID) registry
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/oid_registry.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/asn1.h>
+#include "oid_registry_data.c"
+
+MODULE_DESCRIPTION("OID Registry");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+/**
+ * look_up_OID - Find an OID registration for the specified data
+ * @data: Binary representation of the OID
+ * @datasize: Size of the binary representation
+ */
+enum OID look_up_OID(const void *data, size_t datasize)
+{
+ const unsigned char *octets = data;
+ enum OID oid;
+ unsigned char xhash;
+ unsigned i, j, k, hash;
+ size_t len;
+
+ /* Hash the OID data */
+ hash = datasize - 1;
+
+ for (i = 0; i < datasize; i++)
+ hash += octets[i] * 33;
+ hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash;
+ hash &= 0xff;
+
+ /* Binary search the OID registry. OIDs are stored in ascending order
+ * of hash value then ascending order of size and then in ascending
+ * order of reverse value.
+ */
+ i = 0;
+ k = OID__NR;
+ while (i < k) {
+ j = (i + k) / 2;
+
+ xhash = oid_search_table[j].hash;
+ if (xhash > hash) {
+ k = j;
+ continue;
+ }
+ if (xhash < hash) {
+ i = j + 1;
+ continue;
+ }
+
+ oid = oid_search_table[j].oid;
+ len = oid_index[oid + 1] - oid_index[oid];
+ if (len > datasize) {
+ k = j;
+ continue;
+ }
+ if (len < datasize) {
+ i = j + 1;
+ continue;
+ }
+
+ /* Variation is most likely to be at the tail end of the
+ * OID, so do the comparison in reverse.
+ */
+ while (len > 0) {
+ unsigned char a = oid_data[oid_index[oid] + --len];
+ unsigned char b = octets[len];
+ if (a > b) {
+ k = j;
+ goto next;
+ }
+ if (a < b) {
+ i = j + 1;
+ goto next;
+ }
+ }
+ return oid;
+ next:
+ ;
+ }
+
+ return OID__NR;
+}
+EXPORT_SYMBOL_GPL(look_up_OID);
+
+/**
+ * parse_OID - Parse an OID from a bytestream
+ * @data: Binary representation of the header + OID
+ * @datasize: Size of the binary representation
+ * @oid: Pointer to oid to return result
+ *
+ * Parse an OID from a bytestream that holds the OID in the format
+ * ASN1_OID | length | oid. The length indicator must equal to datasize - 2.
+ * -EBADMSG is returned if the bytestream is too short.
+ */
+int parse_OID(const void *data, size_t datasize, enum OID *oid)
+{
+ const unsigned char *v = data;
+
+ /* we need 2 bytes of header and at least 1 byte for oid */
+ if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2)
+ return -EBADMSG;
+
+ *oid = look_up_OID(data + 2, datasize - 2);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(parse_OID);
+
+/*
+ * sprint_OID - Print an Object Identifier into a buffer
+ * @data: The encoded OID to print
+ * @datasize: The size of the encoded OID
+ * @buffer: The buffer to render into
+ * @bufsize: The size of the buffer
+ *
+ * The OID is rendered into the buffer in "a.b.c.d" format and the number of
+ * bytes is returned. -EBADMSG is returned if the data could not be interpreted
+ * and -ENOBUFS if the buffer was too small.
+ */
+int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
+{
+ const unsigned char *v = data, *end = v + datasize;
+ unsigned long num;
+ unsigned char n;
+ size_t ret;
+ int count;
+
+ if (v >= end)
+ goto bad;
+
+ n = *v++;
+ ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
+ if (count >= bufsize)
+ return -ENOBUFS;
+ buffer += count;
+ bufsize -= count;
+
+ while (v < end) {
+ n = *v++;
+ if (!(n & 0x80)) {
+ num = n;
+ } else {
+ num = n & 0x7f;
+ do {
+ if (v >= end)
+ goto bad;
+ n = *v++;
+ num <<= 7;
+ num |= n & 0x7f;
+ } while (n & 0x80);
+ }
+ ret += count = snprintf(buffer, bufsize, ".%lu", num);
+ if (count >= bufsize)
+ return -ENOBUFS;
+ buffer += count;
+ bufsize -= count;
+ }
+
+ return ret;
+
+bad:
+ snprintf(buffer, bufsize, "(bad)");
+ return -EBADMSG;
+}
+EXPORT_SYMBOL_GPL(sprint_oid);
+
+/**
+ * sprint_OID - Print an Object Identifier into a buffer
+ * @oid: The OID to print
+ * @buffer: The buffer to render into
+ * @bufsize: The size of the buffer
+ *
+ * The OID is rendered into the buffer in "a.b.c.d" format and the number of
+ * bytes is returned.
+ */
+int sprint_OID(enum OID oid, char *buffer, size_t bufsize)
+{
+ int ret;
+
+ BUG_ON(oid >= OID__NR);
+
+ ret = sprint_oid(oid_data + oid_index[oid],
+ oid_index[oid + 1] - oid_index[oid],
+ buffer, bufsize);
+ BUG_ON(ret == -EBADMSG);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sprint_OID);
diff --git a/lib/once.c b/lib/once.c
new file mode 100644
index 000000000..2c306f0e8
--- /dev/null
+++ b/lib/once.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/module.h>
+
+struct once_work {
+ struct work_struct work;
+ struct static_key_true *key;
+ struct module *module;
+};
+
+static void once_deferred(struct work_struct *w)
+{
+ struct once_work *work;
+
+ work = container_of(w, struct once_work, work);
+ BUG_ON(!static_key_enabled(work->key));
+ static_branch_disable(work->key);
+ module_put(work->module);
+ kfree(work);
+}
+
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
+{
+ struct once_work *w;
+
+ w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return;
+
+ INIT_WORK(&w->work, once_deferred);
+ w->key = key;
+ w->module = mod;
+ __module_get(mod);
+ schedule_work(&w->work);
+}
+
+static DEFINE_SPINLOCK(once_lock);
+
+bool __do_once_start(bool *done, unsigned long *flags)
+ __acquires(once_lock)
+{
+ spin_lock_irqsave(&once_lock, *flags);
+ if (*done) {
+ spin_unlock_irqrestore(&once_lock, *flags);
+ /* Keep sparse happy by restoring an even lock count on
+ * this lock. In case we return here, we don't call into
+ * __do_once_done but return early in the DO_ONCE() macro.
+ */
+ __acquire(once_lock);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__do_once_start);
+
+void __do_once_done(bool *done, struct static_key_true *once_key,
+ unsigned long *flags, struct module *mod)
+ __releases(once_lock)
+{
+ *done = true;
+ spin_unlock_irqrestore(&once_lock, *flags);
+ once_disable_jump(once_key, mod);
+}
+EXPORT_SYMBOL(__do_once_done);
+
+static DEFINE_MUTEX(once_mutex);
+
+bool __do_once_sleepable_start(bool *done)
+ __acquires(once_mutex)
+{
+ mutex_lock(&once_mutex);
+ if (*done) {
+ mutex_unlock(&once_mutex);
+ /* Keep sparse happy by restoring an even lock count on
+ * this mutex. In case we return here, we don't call into
+ * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
+ */
+ __acquire(once_mutex);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__do_once_sleepable_start);
+
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod)
+ __releases(once_mutex)
+{
+ *done = true;
+ mutex_unlock(&once_mutex);
+ once_disable_jump(once_key, mod);
+}
+EXPORT_SYMBOL(__do_once_sleepable_done);
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
new file mode 100644
index 000000000..34db0b3aa
--- /dev/null
+++ b/lib/overflow_kunit.c
@@ -0,0 +1,1149 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Test cases for arithmetic overflow checks. See:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run overflow [--raw_output]
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#define SKIP(cond, reason) do { \
+ if (cond) { \
+ kunit_skip(test, reason); \
+ return; \
+ } \
+} while (0)
+
+/*
+ * Clang 11 and earlier generate unwanted libcalls for signed output
+ * on unsigned input.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 11
+# define SKIP_SIGN_MISMATCH(t) SKIP(t, "Clang 11 unwanted libcalls")
+#else
+# define SKIP_SIGN_MISMATCH(t) do { } while (0)
+#endif
+
+/*
+ * Clang 13 and earlier generate unwanted libcalls for 64-bit tests on
+ * 32-bit hosts.
+ */
+#if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 13 && \
+ BITS_PER_LONG != 64
+# define SKIP_64_ON_32(t) SKIP(t, "Clang 13 unwanted libcalls")
+#else
+# define SKIP_64_ON_32(t) do { } while (0)
+#endif
+
+#define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \
+ static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \
+ t1 a; \
+ t2 b; \
+ t sum, diff, prod; \
+ bool s_of, d_of, p_of; \
+ } t1 ## _ ## t2 ## __ ## t ## _tests[]
+
+#define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t)
+
+DEFINE_TEST_ARRAY(u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U8_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U8_MAX, U8_MAX, 1, 0, false, true, false},
+ {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false},
+ {1, U8_MAX, 0, 2, U8_MAX, true, true, false},
+ {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false},
+ {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true},
+
+ {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true},
+ {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true},
+
+ {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false},
+ {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true},
+ {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false},
+ {1U << 7, 1U << 7, 0, 0, 0, true, false, true},
+
+ {48, 32, 80, 16, 0, false, false, true},
+ {128, 128, 0, 0, 0, true, false, true},
+ {123, 234, 101, 145, 110, true, true, true},
+};
+DEFINE_TEST_ARRAY(u16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U16_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U16_MAX, U16_MAX, 1, 0, false, true, false},
+ {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false},
+ {1, U16_MAX, 0, 2, U16_MAX, true, true, false},
+ {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false},
+ {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true},
+
+ {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true},
+ {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true},
+
+ {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false},
+ {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true},
+ {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false},
+ {1U << 15, 1U << 15, 0, 0, 0, true, false, true},
+
+ {123, 234, 357, 65425, 28782, false, true, false},
+ {1234, 2345, 3579, 64425, 10146, false, true, true},
+};
+DEFINE_TEST_ARRAY(u32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U32_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U32_MAX, U32_MAX, 1, 0, false, true, false},
+ {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false},
+ {1, U32_MAX, 0, 2, U32_MAX, true, true, false},
+ {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false},
+ {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true},
+
+ {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true},
+ {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true},
+
+ {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false},
+ {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true},
+ {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false},
+ {1U << 31, 1U << 31, 0, 0, 0, true, false, true},
+
+ {-2U, 1U, -1U, -3U, -2U, false, false, false},
+ {-4U, 5U, 1U, -9U, -20U, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(u64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 1, 2, 0, 1, false, false, false},
+ {0, 1, 1, U64_MAX, 0, false, true, false},
+ {1, 0, 1, 1, 0, false, false, false},
+ {0, U64_MAX, U64_MAX, 1, 0, false, true, false},
+ {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false},
+ {1, U64_MAX, 0, 2, U64_MAX, true, true, false},
+ {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false},
+ {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true},
+
+ {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true},
+ {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true},
+
+ {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false},
+ {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true},
+ {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false},
+ {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true},
+ {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */,
+ 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL,
+ false, true, false},
+ {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true},
+};
+
+DEFINE_TEST_ARRAY(s8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false},
+ {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false},
+ {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false},
+ {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false},
+
+ {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true},
+ {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true},
+ {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false},
+ {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false},
+ {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false},
+ {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false},
+
+ {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false},
+ {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false},
+ {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false},
+ {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false},
+
+ {S8_MIN, S8_MIN, 0, 0, 0, true, false, true},
+ {S8_MAX, S8_MAX, -2, 0, 1, true, false, true},
+
+ {-4, -32, -36, 28, -128, false, false, true},
+ {-4, 32, 28, -36, -128, false, false, false},
+};
+
+DEFINE_TEST_ARRAY(s16) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false},
+ {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false},
+ {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false},
+ {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false},
+
+ {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true},
+ {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true},
+ {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false},
+ {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false},
+ {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false},
+ {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false},
+
+ {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false},
+ {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false},
+ {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false},
+ {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false},
+
+ {S16_MIN, S16_MIN, 0, 0, 0, true, false, true},
+ {S16_MAX, S16_MAX, -2, 0, 1, true, false, true},
+};
+DEFINE_TEST_ARRAY(s32) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false},
+ {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false},
+ {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false},
+ {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false},
+
+ {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true},
+ {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true},
+ {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false},
+ {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false},
+ {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false},
+ {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false},
+
+ {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false},
+ {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false},
+ {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false},
+ {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false},
+
+ {S32_MIN, S32_MIN, 0, 0, 0, true, false, true},
+ {S32_MAX, S32_MAX, -2, 0, 1, true, false, true},
+};
+
+DEFINE_TEST_ARRAY(s64) = {
+ {0, 0, 0, 0, 0, false, false, false},
+
+ {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false},
+ {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false},
+ {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false},
+ {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false},
+
+ {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true},
+ {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true},
+ {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false},
+ {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false},
+ {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false},
+ {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false},
+
+ {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false},
+ {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false},
+ {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false},
+ {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false},
+
+ {S64_MIN, S64_MIN, 0, 0, 0, true, false, true},
+ {S64_MAX, S64_MAX, -2, 0, 1, true, false, true},
+
+ {-1, -1, -2, 0, 1, false, false, false},
+ {-1, -128, -129, 127, 128, false, false, false},
+ {-128, -1, -129, -127, 128, false, false, false},
+ {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false},
+};
+
+#define check_one_op(t, fmt, op, sym, a, b, r, of) do { \
+ int _a_orig = a, _a_bump = a + 1; \
+ int _b_orig = b, _b_bump = b + 1; \
+ bool _of; \
+ t _r; \
+ \
+ _of = check_ ## op ## _overflow(a, b, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _of, of, \
+ "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ a, b, of ? "" : " not", #t); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, r, \
+ "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
+} while (0)
+
+#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
+static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
+{ \
+ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
+ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
+ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
+ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
+ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+} \
+ \
+static void n ## _overflow_test(struct kunit *test) { \
+ unsigned i; \
+ \
+ SKIP_64_ON_32(__same_type(t, u64)); \
+ SKIP_64_ON_32(__same_type(t, s64)); \
+ SKIP_SIGN_MISMATCH(__same_type(n ## _tests[0].a, u32) && \
+ __same_type(n ## _tests[0].b, u32) && \
+ __same_type(n ## _tests[0].sum, int)); \
+ \
+ for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \
+ do_test_ ## n(test, &n ## _tests[i]); \
+ kunit_info(test, "%zu %s arithmetic tests finished\n", \
+ ARRAY_SIZE(n ## _tests), #n); \
+}
+
+#define DEFINE_TEST_FUNC(t, fmt) \
+ DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt)
+
+DEFINE_TEST_FUNC(u8, "%d");
+DEFINE_TEST_FUNC(s8, "%d");
+DEFINE_TEST_FUNC(u16, "%d");
+DEFINE_TEST_FUNC(s16, "%d");
+DEFINE_TEST_FUNC(u32, "%u");
+DEFINE_TEST_FUNC(s32, "%d");
+DEFINE_TEST_FUNC(u64, "%llu");
+DEFINE_TEST_FUNC(s64, "%lld");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true},
+ {U8_MAX + 1, 0, 0, 0, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U32_MAX, 0, -1, -1, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false},
+ {1, 2, 3, -1, 2, false, false, false},
+};
+DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d");
+
+DEFINE_TEST_ARRAY_TYPED(int, int, u8) = {
+ {0, 0, 0, 0, 0, false, false, false},
+ {1, 2, 3, U8_MAX, 2, false, true, false},
+ {-1, 0, U8_MAX, U8_MAX, 0, true, true, false},
+};
+DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d");
+
+/* Args are: value, shift, type, expected result, overflow expected */
+#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \
+ typeof(a) __a = (a); \
+ typeof(s) __s = (s); \
+ t __e = (expect); \
+ t __d; \
+ bool __of = check_shl_overflow(__a, __s, &__d); \
+ if (__of != of) { \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected (%s)(%s << %s) to%s overflow\n", \
+ #t, #a, #s, of ? "" : " not"); \
+ } else if (!__of && __d != __e) { \
+ KUNIT_EXPECT_EQ_MSG(test, __d, __e, \
+ "expected (%s)(%s << %s) == %s\n", \
+ #t, #a, #s, #expect); \
+ if ((t)-1 < 0) \
+ kunit_info(test, "got %lld\n", (s64)__d); \
+ else \
+ kunit_info(test, "got %llu\n", (u64)__d); \
+ } \
+ count++; \
+} while (0)
+
+static void shift_sane_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Sane shifts. */
+ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false);
+ TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false);
+ TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false);
+ TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false);
+ TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false);
+ TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false);
+ TEST_ONE_SHIFT(1, 0, int, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, int, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, int, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false);
+ TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false);
+ TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false);
+ TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false);
+ TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false);
+ TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false);
+ TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false);
+ TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false);
+ TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false);
+ TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false);
+ TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false);
+
+ /* Sane shift: start and end with 0, without a too-wide shift. */
+ TEST_ONE_SHIFT(0, 7, u8, 0, false);
+ TEST_ONE_SHIFT(0, 15, u16, 0, false);
+ TEST_ONE_SHIFT(0, 31, unsigned int, 0, false);
+ TEST_ONE_SHIFT(0, 31, u32, 0, false);
+ TEST_ONE_SHIFT(0, 63, u64, 0, false);
+
+ /* Sane shift: start and end with 0, without reaching signed bit. */
+ TEST_ONE_SHIFT(0, 6, s8, 0, false);
+ TEST_ONE_SHIFT(0, 14, s16, 0, false);
+ TEST_ONE_SHIFT(0, 30, int, 0, false);
+ TEST_ONE_SHIFT(0, 30, s32, 0, false);
+ TEST_ONE_SHIFT(0, 62, s64, 0, false);
+
+ kunit_info(test, "%d sane shift tests finished\n", count);
+}
+
+static void shift_overflow_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Overflow: shifted the bit off the end. */
+ TEST_ONE_SHIFT(1, 8, u8, 0, true);
+ TEST_ONE_SHIFT(1, 16, u16, 0, true);
+ TEST_ONE_SHIFT(1, 32, unsigned int, 0, true);
+ TEST_ONE_SHIFT(1, 32, u32, 0, true);
+ TEST_ONE_SHIFT(1, 64, u64, 0, true);
+
+ /* Overflow: shifted into the signed bit. */
+ TEST_ONE_SHIFT(1, 7, s8, 0, true);
+ TEST_ONE_SHIFT(1, 15, s16, 0, true);
+ TEST_ONE_SHIFT(1, 31, int, 0, true);
+ TEST_ONE_SHIFT(1, 31, s32, 0, true);
+ TEST_ONE_SHIFT(1, 63, s64, 0, true);
+
+ /* Overflow: high bit falls off unsigned types. */
+ /* 10010110 */
+ TEST_ONE_SHIFT(150, 1, u8, 0, true);
+ /* 1000100010010110 */
+ TEST_ONE_SHIFT(34966, 1, u16, 0, true);
+ /* 10000100000010001000100010010110 */
+ TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true);
+ TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true);
+ /* 1000001000010000010000000100000010000100000010001000100010010110 */
+ TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true);
+
+ /* Overflow: bit shifted into signed bit on signed types. */
+ /* 01001011 */
+ TEST_ONE_SHIFT(75, 1, s8, 0, true);
+ /* 0100010001001011 */
+ TEST_ONE_SHIFT(17483, 1, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ TEST_ONE_SHIFT(1107575883, 1, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 1, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true);
+
+ /* Overflow: bit shifted past signed bit on signed types. */
+ /* 01001011 */
+ TEST_ONE_SHIFT(75, 2, s8, 0, true);
+ /* 0100010001001011 */
+ TEST_ONE_SHIFT(17483, 2, s16, 0, true);
+ /* 01000010000001000100010001001011 */
+ TEST_ONE_SHIFT(1107575883, 2, s32, 0, true);
+ TEST_ONE_SHIFT(1107575883, 2, int, 0, true);
+ /* 0100000100001000001000000010000001000010000001000100010001001011 */
+ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true);
+
+ kunit_info(test, "%d overflow shift tests finished\n", count);
+}
+
+static void shift_truncate_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Overflow: values larger than destination type. */
+ TEST_ONE_SHIFT(0x100, 0, u8, 0, true);
+ TEST_ONE_SHIFT(0xFF, 0, s8, 0, true);
+ TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true);
+ TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true);
+ TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true);
+ TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true);
+
+ /* Overflow: shifted at or beyond entire type's bit width. */
+ TEST_ONE_SHIFT(0, 8, u8, 0, true);
+ TEST_ONE_SHIFT(0, 9, u8, 0, true);
+ TEST_ONE_SHIFT(0, 8, s8, 0, true);
+ TEST_ONE_SHIFT(0, 9, s8, 0, true);
+ TEST_ONE_SHIFT(0, 16, u16, 0, true);
+ TEST_ONE_SHIFT(0, 17, u16, 0, true);
+ TEST_ONE_SHIFT(0, 16, s16, 0, true);
+ TEST_ONE_SHIFT(0, 17, s16, 0, true);
+ TEST_ONE_SHIFT(0, 32, u32, 0, true);
+ TEST_ONE_SHIFT(0, 33, u32, 0, true);
+ TEST_ONE_SHIFT(0, 32, int, 0, true);
+ TEST_ONE_SHIFT(0, 33, int, 0, true);
+ TEST_ONE_SHIFT(0, 32, s32, 0, true);
+ TEST_ONE_SHIFT(0, 33, s32, 0, true);
+ TEST_ONE_SHIFT(0, 64, u64, 0, true);
+ TEST_ONE_SHIFT(0, 65, u64, 0, true);
+ TEST_ONE_SHIFT(0, 64, s64, 0, true);
+ TEST_ONE_SHIFT(0, 65, s64, 0, true);
+
+ kunit_info(test, "%d truncate shift tests finished\n", count);
+}
+
+static void shift_nonsense_test(struct kunit *test)
+{
+ int count = 0;
+
+ /* Nonsense: negative initial value. */
+ TEST_ONE_SHIFT(-1, 0, s8, 0, true);
+ TEST_ONE_SHIFT(-1, 0, u8, 0, true);
+ TEST_ONE_SHIFT(-5, 0, s16, 0, true);
+ TEST_ONE_SHIFT(-5, 0, u16, 0, true);
+ TEST_ONE_SHIFT(-10, 0, int, 0, true);
+ TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true);
+ TEST_ONE_SHIFT(-100, 0, s32, 0, true);
+ TEST_ONE_SHIFT(-100, 0, u32, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, s64, 0, true);
+ TEST_ONE_SHIFT(-10000, 0, u64, 0, true);
+
+ /* Nonsense: negative shift values. */
+ TEST_ONE_SHIFT(0, -5, s8, 0, true);
+ TEST_ONE_SHIFT(0, -5, u8, 0, true);
+ TEST_ONE_SHIFT(0, -10, s16, 0, true);
+ TEST_ONE_SHIFT(0, -10, u16, 0, true);
+ TEST_ONE_SHIFT(0, -15, int, 0, true);
+ TEST_ONE_SHIFT(0, -15, unsigned int, 0, true);
+ TEST_ONE_SHIFT(0, -20, s32, 0, true);
+ TEST_ONE_SHIFT(0, -20, u32, 0, true);
+ TEST_ONE_SHIFT(0, -30, s64, 0, true);
+ TEST_ONE_SHIFT(0, -30, u64, 0, true);
+
+ /*
+ * Corner case: for unsigned types, we fail when we've shifted
+ * through the entire width of bits. For signed types, we might
+ * want to match this behavior, but that would mean noticing if
+ * we shift through all but the signed bit, and this is not
+ * currently detected (but we'll notice an overflow into the
+ * signed bit). So, for now, we will test this condition but
+ * mark it as not expected to overflow.
+ */
+ TEST_ONE_SHIFT(0, 7, s8, 0, false);
+ TEST_ONE_SHIFT(0, 15, s16, 0, false);
+ TEST_ONE_SHIFT(0, 31, int, 0, false);
+ TEST_ONE_SHIFT(0, 31, s32, 0, false);
+ TEST_ONE_SHIFT(0, 63, s64, 0, false);
+
+ kunit_info(test, "%d nonsense shift tests finished\n", count);
+}
+#undef TEST_ONE_SHIFT
+
+/*
+ * Deal with the various forms of allocator arguments. See comments above
+ * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
+ */
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
+#define alloc000(alloc, arg, sz) alloc(sz)
+#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
+#define free0(free, arg, ptr) free(ptr)
+#define free1(free, arg, ptr) free(arg, ptr)
+
+/* Wrap around to 16K */
+#define TEST_SIZE (5 * 4096)
+
+#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
+static void test_ ## func (struct kunit *test, void *arg) \
+{ \
+ volatile size_t a = TEST_SIZE; \
+ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \
+ void *ptr; \
+ \
+ /* Tiny allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " failed regular allocation?!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Wrapped allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ a * b); \
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \
+ #func " unexpectedly failed bad wrapping?!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ \
+ /* Saturated allocation test. */ \
+ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \
+ array_size(a, b)); \
+ if (ptr) { \
+ KUNIT_FAIL(test, #func " missed saturation!\n"); \
+ free ## want_arg (free_func, arg, ptr); \
+ } \
+}
+
+/*
+ * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node())
+ * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc())
+ * Allocator uses a special leading argument + | | (e.g. devm_kmalloc())
+ * | | |
+ */
+DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0);
+DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1);
+DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0);
+DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0);
+
+static void overflow_allocation_test(struct kunit *test)
+{
+ const char device_name[] = "overflow-test";
+ struct device *dev;
+ int count = 0;
+
+#define check_allocation_overflow(alloc) do { \
+ count++; \
+ test_ ## alloc(test, dev); \
+} while (0)
+
+ /* Create dummy device for devm_kmalloc()-family tests. */
+ dev = root_device_register(device_name);
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
+ "Cannot register test device\n");
+
+ check_allocation_overflow(kmalloc);
+ check_allocation_overflow(kmalloc_node);
+ check_allocation_overflow(kzalloc);
+ check_allocation_overflow(kzalloc_node);
+ check_allocation_overflow(__vmalloc);
+ check_allocation_overflow(kvmalloc);
+ check_allocation_overflow(kvmalloc_node);
+ check_allocation_overflow(kvzalloc);
+ check_allocation_overflow(kvzalloc_node);
+ check_allocation_overflow(devm_kmalloc);
+ check_allocation_overflow(devm_kzalloc);
+
+ device_unregister(dev);
+
+ kunit_info(test, "%d allocation overflow tests finished\n", count);
+#undef check_allocation_overflow
+}
+
+struct __test_flex_array {
+ unsigned long flags;
+ size_t count;
+ unsigned long data[];
+};
+
+static void overflow_size_helpers_test(struct kunit *test)
+{
+ /* Make sure struct_size() can be used in a constant expression. */
+ u8 ce_array[struct_size_t(struct __test_flex_array, data, 55)];
+ struct __test_flex_array *obj;
+ int count = 0;
+ int var;
+ volatile int unconst = 0;
+
+ /* Verify constant expression against runtime version. */
+ var = 55;
+ OPTIMIZER_HIDE_VAR(var);
+ KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var));
+
+#define check_one_size_helper(expected, func, args...) do { \
+ size_t _r = func(args); \
+ KUNIT_EXPECT_EQ_MSG(test, _r, expected, \
+ "expected " #func "(" #args ") to return %zu but got %zu instead\n", \
+ (size_t)(expected), _r); \
+ count++; \
+} while (0)
+
+ var = 4;
+ check_one_size_helper(20, size_mul, var++, 5);
+ check_one_size_helper(20, size_mul, 4, var++);
+ check_one_size_helper(0, size_mul, 0, 3);
+ check_one_size_helper(0, size_mul, 3, 0);
+ check_one_size_helper(6, size_mul, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(9, size_add, var++, 5);
+ check_one_size_helper(9, size_add, 4, var++);
+ check_one_size_helper(9, size_add, 9, 0);
+ check_one_size_helper(9, size_add, 0, 9);
+ check_one_size_helper(5, size_add, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3);
+ check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3);
+
+ var = 4;
+ check_one_size_helper(1, size_sub, var--, 3);
+ check_one_size_helper(1, size_sub, 4, var--);
+ check_one_size_helper(1, size_sub, 3, 2);
+ check_one_size_helper(9, size_sub, 9, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, 9, -3);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, 9);
+ check_one_size_helper(SIZE_MAX, size_sub, 2, 3);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0);
+ check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10);
+ check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX);
+ check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1);
+ check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3);
+ check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3);
+
+ var = 4;
+ check_one_size_helper(4 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(5 * sizeof(*obj->data),
+ flex_array_size, obj, data, var++);
+ check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj->data),
+ flex_array_size, obj, data, 1 + unconst);
+ check_one_size_helper(7 * sizeof(*obj->data),
+ flex_array_size, obj, data, 7 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, -1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ flex_array_size, obj, data, SIZE_MAX - 4 + unconst);
+
+ var = 4;
+ check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)),
+ struct_size, obj, data, var++);
+ check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst);
+ check_one_size_helper(sizeof(*obj) + sizeof(*obj->data),
+ struct_size, obj, data, 1 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, -3 + unconst);
+ check_one_size_helper(SIZE_MAX,
+ struct_size, obj, data, SIZE_MAX - 3 + unconst);
+
+ kunit_info(test, "%d overflow size helper tests finished\n", count);
+#undef check_one_size_helper
+}
+
+static void overflows_type_test(struct kunit *test)
+{
+ int count = 0;
+ unsigned int var;
+
+#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \
+ bool __of = func(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\
+ of ? "" : " not"); \
+ count++; \
+} while (0)
+
+/* Args are: first type, second type, value, overflow expected */
+#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \
+ __t1 t1 = (v); \
+ __t2 t2; \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\
+} while (0)
+
+ TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true);
+ TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false);
+#endif
+
+ /* Check for macro side-effects. */
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true);
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true);
+
+ kunit_info(test, "%d overflows_type() tests finished\n", count);
+#undef TEST_OVERFLOWS_TYPE
+#undef __TEST_OVERFLOWS_TYPE
+}
+
+static void same_type_test(struct kunit *test)
+{
+ int count = 0;
+ int var;
+
+#define TEST_SAME_TYPE(t1, t2, same) do { \
+ typeof(t1) __t1h = type_max(t1); \
+ typeof(t1) __t1l = type_min(t1); \
+ typeof(t2) __t2h = type_max(t2); \
+ typeof(t2) __t2l = type_min(t2); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \
+} while (0)
+
+#if BITS_PER_LONG == 64
+# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m)
+#else
+# define TEST_SAME_TYPE64(base, t, m) do { } while (0)
+#endif
+
+#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \
+do { \
+ TEST_SAME_TYPE(base, u8, mu8); \
+ TEST_SAME_TYPE(base, u16, mu16); \
+ TEST_SAME_TYPE(base, u32, mu32); \
+ TEST_SAME_TYPE(base, s8, ms8); \
+ TEST_SAME_TYPE(base, s16, ms16); \
+ TEST_SAME_TYPE(base, s32, ms32); \
+ TEST_SAME_TYPE64(base, u64, mu64); \
+ TEST_SAME_TYPE64(base, s64, ms64); \
+} while (0)
+
+ TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false);
+ TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false);
+ TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false);
+ TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false);
+#if BITS_PER_LONG == 64
+ TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false);
+ TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true);
+#endif
+
+ /* Check for macro side-effects. */
+ var = 4;
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, int));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(int, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+
+ kunit_info(test, "%d __same_type() tests finished\n", count);
+
+#undef TEST_TYPE_SETS
+#undef TEST_SAME_TYPE64
+#undef TEST_SAME_TYPE
+}
+
+static void castable_to_type_test(struct kunit *test)
+{
+ int count = 0;
+
+#define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \
+ bool __pass = castable_to_type(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \
+ "expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\
+ pass ? "" : " not"); \
+ count++; \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE(16, u8, true);
+ TEST_CASTABLE_TO_TYPE(16, u16, true);
+ TEST_CASTABLE_TO_TYPE(16, u32, true);
+ TEST_CASTABLE_TO_TYPE(16, s8, true);
+ TEST_CASTABLE_TO_TYPE(16, s16, true);
+ TEST_CASTABLE_TO_TYPE(16, s32, true);
+ TEST_CASTABLE_TO_TYPE(-16, s8, true);
+ TEST_CASTABLE_TO_TYPE(-16, s16, true);
+ TEST_CASTABLE_TO_TYPE(-16, s32, true);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE(16, u64, true);
+ TEST_CASTABLE_TO_TYPE(-16, s64, true);
+#endif
+
+#define TEST_CASTABLE_TO_TYPE_VAR(width) do { \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expressions that fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \
+ /* Constant expressions that do not fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \
+} while (0)
+
+#define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \
+ unsigned long big = U ## width ## _MAX; \
+ signed long small = S ## width ## _MIN; \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expression in range. */ \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \
+ /* Constant expression out of range. */ \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(big, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE_VAR(8);
+ TEST_CASTABLE_TO_TYPE_VAR(16);
+ TEST_CASTABLE_TO_TYPE_VAR(32);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_VAR(64);
+#endif
+
+ TEST_CASTABLE_TO_TYPE_RANGE(8);
+ TEST_CASTABLE_TO_TYPE_RANGE(16);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_RANGE(32);
+#endif
+ kunit_info(test, "%d castable_to_type() tests finished\n", count);
+
+#undef TEST_CASTABLE_TO_TYPE_RANGE
+#undef TEST_CASTABLE_TO_TYPE_VAR
+#undef TEST_CASTABLE_TO_TYPE
+}
+
+static struct kunit_case overflow_test_cases[] = {
+ KUNIT_CASE(u8_u8__u8_overflow_test),
+ KUNIT_CASE(s8_s8__s8_overflow_test),
+ KUNIT_CASE(u16_u16__u16_overflow_test),
+ KUNIT_CASE(s16_s16__s16_overflow_test),
+ KUNIT_CASE(u32_u32__u32_overflow_test),
+ KUNIT_CASE(s32_s32__s32_overflow_test),
+ KUNIT_CASE(u64_u64__u64_overflow_test),
+ KUNIT_CASE(s64_s64__s64_overflow_test),
+ KUNIT_CASE(u32_u32__int_overflow_test),
+ KUNIT_CASE(u32_u32__u8_overflow_test),
+ KUNIT_CASE(u8_u8__int_overflow_test),
+ KUNIT_CASE(int_int__u8_overflow_test),
+ KUNIT_CASE(shift_sane_test),
+ KUNIT_CASE(shift_overflow_test),
+ KUNIT_CASE(shift_truncate_test),
+ KUNIT_CASE(shift_nonsense_test),
+ KUNIT_CASE(overflow_allocation_test),
+ KUNIT_CASE(overflow_size_helpers_test),
+ KUNIT_CASE(overflows_type_test),
+ KUNIT_CASE(same_type_test),
+ KUNIT_CASE(castable_to_type_test),
+ {}
+};
+
+static struct kunit_suite overflow_test_suite = {
+ .name = "overflow",
+ .test_cases = overflow_test_cases,
+};
+
+kunit_test_suite(overflow_test_suite);
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/packing.c b/lib/packing.c
new file mode 100644
index 000000000..3f656167c
--- /dev/null
+++ b/lib/packing.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright 2016-2018 NXP
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/packing.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/bitrev.h>
+
+static int get_le_offset(int offset)
+{
+ int closest_multiple_of_4;
+
+ closest_multiple_of_4 = (offset / 4) * 4;
+ offset -= closest_multiple_of_4;
+ return closest_multiple_of_4 + (3 - offset);
+}
+
+static int get_reverse_lsw32_offset(int offset, size_t len)
+{
+ int closest_multiple_of_4;
+ int word_index;
+
+ word_index = offset / 4;
+ closest_multiple_of_4 = word_index * 4;
+ offset -= closest_multiple_of_4;
+ word_index = (len / 4) - word_index - 1;
+ return word_index * 4 + offset;
+}
+
+static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
+ int *box_end_bit, u8 *box_mask)
+{
+ int box_bit_width = *box_start_bit - *box_end_bit + 1;
+ int new_box_start_bit, new_box_end_bit;
+
+ *to_write >>= *box_end_bit;
+ *to_write = bitrev8(*to_write) >> (8 - box_bit_width);
+ *to_write <<= *box_end_bit;
+
+ new_box_end_bit = box_bit_width - *box_start_bit - 1;
+ new_box_start_bit = box_bit_width - *box_end_bit - 1;
+ *box_mask = GENMASK_ULL(new_box_start_bit, new_box_end_bit);
+ *box_start_bit = new_box_start_bit;
+ *box_end_bit = new_box_end_bit;
+}
+
+/**
+ * packing - Convert numbers (currently u64) between a packed and an unpacked
+ * format. Unpacked means laid out in memory in the CPU's native
+ * understanding of integers, while packed means anything else that
+ * requires translation.
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @uval: Pointer to an u64 holding the unpacked value.
+ * @startbit: The index (in logical notation, compensated for quirks) where
+ * the packed value starts within pbuf. Must be larger than, or
+ * equal to, endbit.
+ * @endbit: The index (in logical notation, compensated for quirks) where
+ * the packed value ends within pbuf. Must be smaller than, or equal
+ * to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
+ * @op: If PACK, then uval will be treated as const pointer and copied (packed)
+ * into pbuf, between startbit and endbit.
+ * If UNPACK, then pbuf will be treated as const pointer and the logical
+ * value between startbit and endbit will be copied (unpacked) to uval.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
+ * correct usage, return code may be discarded.
+ * If op is PACK, pbuf is modified.
+ * If op is UNPACK, uval is modified.
+ */
+int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
+ enum packing_op op, u8 quirks)
+{
+ /* Number of bits for storing "uval"
+ * also width of the field to access in the pbuf
+ */
+ u64 value_width;
+ /* Logical byte indices corresponding to the
+ * start and end of the field.
+ */
+ int plogical_first_u8, plogical_last_u8, box;
+
+ /* startbit is expected to be larger than endbit */
+ if (startbit < endbit)
+ /* Invalid function call */
+ return -EINVAL;
+
+ value_width = startbit - endbit + 1;
+ if (value_width > 64)
+ return -ERANGE;
+
+ /* Check if "uval" fits in "value_width" bits.
+ * If value_width is 64, the check will fail, but any
+ * 64-bit uval will surely fit.
+ */
+ if (op == PACK && value_width < 64 && (*uval >= (1ull << value_width)))
+ /* Cannot store "uval" inside "value_width" bits.
+ * Truncating "uval" is most certainly not desirable,
+ * so simply erroring out is appropriate.
+ */
+ return -ERANGE;
+
+ /* Initialize parameter */
+ if (op == UNPACK)
+ *uval = 0;
+
+ /* Iterate through an idealistic view of the pbuf as an u64 with
+ * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
+ * logical bit significance. "box" denotes the current logical u8.
+ */
+ plogical_first_u8 = startbit / 8;
+ plogical_last_u8 = endbit / 8;
+
+ for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
+ /* Bit indices into the currently accessed 8-bit box */
+ int box_start_bit, box_end_bit, box_addr;
+ u8 box_mask;
+ /* Corresponding bits from the unpacked u64 parameter */
+ int proj_start_bit, proj_end_bit;
+ u64 proj_mask;
+
+ /* This u8 may need to be accessed in its entirety
+ * (from bit 7 to bit 0), or not, depending on the
+ * input arguments startbit and endbit.
+ */
+ if (box == plogical_first_u8)
+ box_start_bit = startbit % 8;
+ else
+ box_start_bit = 7;
+ if (box == plogical_last_u8)
+ box_end_bit = endbit % 8;
+ else
+ box_end_bit = 0;
+
+ /* We have determined the box bit start and end.
+ * Now we calculate where this (masked) u8 box would fit
+ * in the unpacked (CPU-readable) u64 - the u8 box's
+ * projection onto the unpacked u64. Though the
+ * box is u8, the projection is u64 because it may fall
+ * anywhere within the unpacked u64.
+ */
+ proj_start_bit = ((box * 8) + box_start_bit) - endbit;
+ proj_end_bit = ((box * 8) + box_end_bit) - endbit;
+ proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit);
+ box_mask = GENMASK_ULL(box_start_bit, box_end_bit);
+
+ /* Determine the offset of the u8 box inside the pbuf,
+ * adjusted for quirks. The adjusted box_addr will be used for
+ * effective addressing inside the pbuf (so it's not
+ * logical any longer).
+ */
+ box_addr = pbuflen - box - 1;
+ if (quirks & QUIRK_LITTLE_ENDIAN)
+ box_addr = get_le_offset(box_addr);
+ if (quirks & QUIRK_LSW32_IS_FIRST)
+ box_addr = get_reverse_lsw32_offset(box_addr,
+ pbuflen);
+
+ if (op == UNPACK) {
+ u64 pval;
+
+ /* Read from pbuf, write to uval */
+ pval = ((u8 *)pbuf)[box_addr] & box_mask;
+ if (quirks & QUIRK_MSB_ON_THE_RIGHT)
+ adjust_for_msb_right_quirk(&pval,
+ &box_start_bit,
+ &box_end_bit,
+ &box_mask);
+
+ pval >>= box_end_bit;
+ pval <<= proj_end_bit;
+ *uval &= ~proj_mask;
+ *uval |= pval;
+ } else {
+ u64 pval;
+
+ /* Write to pbuf, read from uval */
+ pval = (*uval) & proj_mask;
+ pval >>= proj_end_bit;
+ if (quirks & QUIRK_MSB_ON_THE_RIGHT)
+ adjust_for_msb_right_quirk(&pval,
+ &box_start_bit,
+ &box_end_bit,
+ &box_mask);
+
+ pval <<= box_end_bit;
+ ((u8 *)pbuf)[box_addr] &= ~box_mask;
+ ((u8 *)pbuf)[box_addr] |= pval;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(packing);
+
+MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
diff --git a/lib/parman.c b/lib/parman.c
new file mode 100644
index 000000000..3f8f8d422
--- /dev/null
+++ b/lib/parman.c
@@ -0,0 +1,375 @@
+/*
+ * lib/parman.c - Manager for linear priority array areas
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/parman.h>
+
+struct parman_algo {
+ int (*item_add)(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+ void (*item_remove)(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item);
+};
+
+struct parman {
+ const struct parman_ops *ops;
+ void *priv;
+ const struct parman_algo *algo;
+ unsigned long count;
+ unsigned long limit_count;
+ struct list_head prio_list;
+};
+
+static int parman_enlarge(struct parman *parman)
+{
+ unsigned long new_count = parman->limit_count +
+ parman->ops->resize_step;
+ int err;
+
+ err = parman->ops->resize(parman->priv, new_count);
+ if (err)
+ return err;
+ parman->limit_count = new_count;
+ return 0;
+}
+
+static int parman_shrink(struct parman *parman)
+{
+ unsigned long new_count = parman->limit_count -
+ parman->ops->resize_step;
+ int err;
+
+ if (new_count < parman->ops->base_count)
+ return 0;
+ err = parman->ops->resize(parman->priv, new_count);
+ if (err)
+ return err;
+ parman->limit_count = new_count;
+ return 0;
+}
+
+static bool parman_prio_used(struct parman_prio *prio)
+{
+ return !list_empty(&prio->item_list);
+}
+
+static struct parman_item *parman_prio_first_item(struct parman_prio *prio)
+{
+ return list_first_entry(&prio->item_list,
+ typeof(struct parman_item), list);
+}
+
+static unsigned long parman_prio_first_index(struct parman_prio *prio)
+{
+ return parman_prio_first_item(prio)->index;
+}
+
+static struct parman_item *parman_prio_last_item(struct parman_prio *prio)
+{
+ return list_last_entry(&prio->item_list,
+ typeof(struct parman_item), list);
+}
+
+static unsigned long parman_prio_last_index(struct parman_prio *prio)
+{
+ return parman_prio_last_item(prio)->index;
+}
+
+static unsigned long parman_lsort_new_index_find(struct parman *parman,
+ struct parman_prio *prio)
+{
+ list_for_each_entry_from_reverse(prio, &parman->prio_list, list) {
+ if (!parman_prio_used(prio))
+ continue;
+ return parman_prio_last_index(prio) + 1;
+ }
+ return 0;
+}
+
+static void __parman_prio_move(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item, unsigned long to_index,
+ unsigned long count)
+{
+ parman->ops->move(parman->priv, item->index, to_index, count);
+}
+
+static void parman_prio_shift_down(struct parman *parman,
+ struct parman_prio *prio)
+{
+ struct parman_item *item;
+ unsigned long to_index;
+
+ if (!parman_prio_used(prio))
+ return;
+ item = parman_prio_first_item(prio);
+ to_index = parman_prio_last_index(prio) + 1;
+ __parman_prio_move(parman, prio, item, to_index, 1);
+ list_move_tail(&item->list, &prio->item_list);
+ item->index = to_index;
+}
+
+static void parman_prio_shift_up(struct parman *parman,
+ struct parman_prio *prio)
+{
+ struct parman_item *item;
+ unsigned long to_index;
+
+ if (!parman_prio_used(prio))
+ return;
+ item = parman_prio_last_item(prio);
+ to_index = parman_prio_first_index(prio) - 1;
+ __parman_prio_move(parman, prio, item, to_index, 1);
+ list_move(&item->list, &prio->item_list);
+ item->index = to_index;
+}
+
+static void parman_prio_item_remove(struct parman *parman,
+ struct parman_prio *prio,
+ struct parman_item *item)
+{
+ struct parman_item *last_item;
+ unsigned long to_index;
+
+ last_item = parman_prio_last_item(prio);
+ if (last_item == item) {
+ list_del(&item->list);
+ return;
+ }
+ to_index = item->index;
+ __parman_prio_move(parman, prio, last_item, to_index, 1);
+ list_del(&last_item->list);
+ list_replace(&item->list, &last_item->list);
+ last_item->index = to_index;
+}
+
+static int parman_lsort_item_add(struct parman *parman,
+ struct parman_prio *prio,
+ struct parman_item *item)
+{
+ struct parman_prio *prio2;
+ unsigned long new_index;
+ int err;
+
+ if (parman->count + 1 > parman->limit_count) {
+ err = parman_enlarge(parman);
+ if (err)
+ return err;
+ }
+
+ new_index = parman_lsort_new_index_find(parman, prio);
+ list_for_each_entry_reverse(prio2, &parman->prio_list, list) {
+ if (prio2 == prio)
+ break;
+ parman_prio_shift_down(parman, prio2);
+ }
+ item->index = new_index;
+ list_add_tail(&item->list, &prio->item_list);
+ parman->count++;
+ return 0;
+}
+
+static void parman_lsort_item_remove(struct parman *parman,
+ struct parman_prio *prio,
+ struct parman_item *item)
+{
+ parman_prio_item_remove(parman, prio, item);
+ list_for_each_entry_continue(prio, &parman->prio_list, list)
+ parman_prio_shift_up(parman, prio);
+ parman->count--;
+ if (parman->limit_count - parman->count >= parman->ops->resize_step)
+ parman_shrink(parman);
+}
+
+static const struct parman_algo parman_lsort = {
+ .item_add = parman_lsort_item_add,
+ .item_remove = parman_lsort_item_remove,
+};
+
+static const struct parman_algo *parman_algos[] = {
+ &parman_lsort,
+};
+
+/**
+ * parman_create - creates a new parman instance
+ * @ops: caller-specific callbacks
+ * @priv: pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Each parman instance manages an array area with chunks of entries
+ * with the same priority. Consider following example:
+ *
+ * item 1 with prio 10
+ * item 2 with prio 10
+ * item 3 with prio 10
+ * item 4 with prio 20
+ * item 5 with prio 20
+ * item 6 with prio 30
+ * item 7 with prio 30
+ * item 8 with prio 30
+ *
+ * In this example, there are 3 priority chunks. The order of the priorities
+ * matters, however the order of items within a single priority chunk does not
+ * matter. So the same array could be ordered as follows:
+ *
+ * item 2 with prio 10
+ * item 3 with prio 10
+ * item 1 with prio 10
+ * item 5 with prio 20
+ * item 4 with prio 20
+ * item 7 with prio 30
+ * item 8 with prio 30
+ * item 6 with prio 30
+ *
+ * The goal of parman is to maintain the priority ordering. The caller
+ * provides @ops with callbacks parman uses to move the items
+ * and resize the array area.
+ *
+ * Returns a pointer to newly created parman instance in case of success,
+ * otherwise it returns NULL.
+ */
+struct parman *parman_create(const struct parman_ops *ops, void *priv)
+{
+ struct parman *parman;
+
+ parman = kzalloc(sizeof(*parman), GFP_KERNEL);
+ if (!parman)
+ return NULL;
+ INIT_LIST_HEAD(&parman->prio_list);
+ parman->ops = ops;
+ parman->priv = priv;
+ parman->limit_count = ops->base_count;
+ parman->algo = parman_algos[ops->algo];
+ return parman;
+}
+EXPORT_SYMBOL(parman_create);
+
+/**
+ * parman_destroy - destroys existing parman instance
+ * @parman: parman instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_destroy(struct parman *parman)
+{
+ WARN_ON(!list_empty(&parman->prio_list));
+ kfree(parman);
+}
+EXPORT_SYMBOL(parman_destroy);
+
+/**
+ * parman_prio_init - initializes a parman priority chunk
+ * @parman: parman instance
+ * @prio: parman prio structure to be initialized
+ * @priority: desired priority of the chunk
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Before caller could add an item with certain priority, he has to
+ * initialize a priority chunk for it using this function.
+ */
+void parman_prio_init(struct parman *parman, struct parman_prio *prio,
+ unsigned long priority)
+{
+ struct parman_prio *prio2;
+ struct list_head *pos;
+
+ INIT_LIST_HEAD(&prio->item_list);
+ prio->priority = priority;
+
+ /* Position inside the list according to priority */
+ list_for_each(pos, &parman->prio_list) {
+ prio2 = list_entry(pos, typeof(*prio2), list);
+ if (prio2->priority > prio->priority)
+ break;
+ }
+ list_add_tail(&prio->list, pos);
+}
+EXPORT_SYMBOL(parman_prio_init);
+
+/**
+ * parman_prio_fini - finalizes use of parman priority chunk
+ * @prio: parman prio structure
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_prio_fini(struct parman_prio *prio)
+{
+ WARN_ON(parman_prio_used(prio));
+ list_del(&prio->list);
+}
+EXPORT_SYMBOL(parman_prio_fini);
+
+/**
+ * parman_item_add - adds a parman item under defined priority
+ * @parman: parman instance
+ * @prio: parman prio instance to add the item to
+ * @item: parman item instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Adds item to a array managed by parman instance under the specified priority.
+ *
+ * Returns 0 in case of success, negative number to indicate an error.
+ */
+int parman_item_add(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item)
+{
+ return parman->algo->item_add(parman, prio, item);
+}
+EXPORT_SYMBOL(parman_item_add);
+
+/**
+ * parman_item_remove - deletes parman item
+ * @parman: parman instance
+ * @prio: parman prio instance to delete the item from
+ * @item: parman item instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_item_remove(struct parman *parman, struct parman_prio *prio,
+ struct parman_item *item)
+{
+ parman->algo->item_remove(parman, prio, item);
+}
+EXPORT_SYMBOL(parman_item_remove);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Priority-based array manager");
diff --git a/lib/parser.c b/lib/parser.c
new file mode 100644
index 000000000..f4eafb9d7
--- /dev/null
+++ b/lib/parser.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * lib/parser.c - simple parser for mount, etc. options.
+ */
+
+#include <linux/ctype.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/kstrtox.h>
+#include <linux/parser.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/*
+ * max size needed by different bases to express U64
+ * HEX: "0xFFFFFFFFFFFFFFFF" --> 18
+ * DEC: "18446744073709551615" --> 20
+ * OCT: "01777777777777777777777" --> 23
+ * pick the max one to define NUMBER_BUF_LEN
+ */
+#define NUMBER_BUF_LEN 24
+
+/**
+ * match_one - Determines if a string matches a simple pattern
+ * @s: the string to examine for presence of the pattern
+ * @p: the string containing the pattern
+ * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
+ * locations.
+ *
+ * Description: Determines if the pattern @p is present in string @s. Can only
+ * match extremely simple token=arg style patterns. If the pattern is found,
+ * the location(s) of the arguments will be returned in the @args array.
+ */
+static int match_one(char *s, const char *p, substring_t args[])
+{
+ char *meta;
+ int argc = 0;
+
+ if (!p)
+ return 1;
+
+ while(1) {
+ int len = -1;
+ meta = strchr(p, '%');
+ if (!meta)
+ return strcmp(p, s) == 0;
+
+ if (strncmp(p, s, meta-p))
+ return 0;
+
+ s += meta - p;
+ p = meta + 1;
+
+ if (isdigit(*p))
+ len = simple_strtoul(p, (char **) &p, 10);
+ else if (*p == '%') {
+ if (*s++ != '%')
+ return 0;
+ p++;
+ continue;
+ }
+
+ if (argc >= MAX_OPT_ARGS)
+ return 0;
+
+ args[argc].from = s;
+ switch (*p++) {
+ case 's': {
+ size_t str_len = strlen(s);
+
+ if (str_len == 0)
+ return 0;
+ if (len == -1 || len > str_len)
+ len = str_len;
+ args[argc].to = s + len;
+ break;
+ }
+ case 'd':
+ simple_strtol(s, &args[argc].to, 0);
+ goto num;
+ case 'u':
+ simple_strtoul(s, &args[argc].to, 0);
+ goto num;
+ case 'o':
+ simple_strtoul(s, &args[argc].to, 8);
+ goto num;
+ case 'x':
+ simple_strtoul(s, &args[argc].to, 16);
+ num:
+ if (args[argc].to == args[argc].from)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ s = args[argc].to;
+ argc++;
+ }
+}
+
+/**
+ * match_token - Find a token (and optional args) in a string
+ * @s: the string to examine for token/argument pairs
+ * @table: match_table_t describing the set of allowed option tokens and the
+ * arguments that may be associated with them. Must be terminated with a
+ * &struct match_token whose pattern is set to the NULL pointer.
+ * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match
+ * locations.
+ *
+ * Description: Detects which if any of a set of token strings has been passed
+ * to it. Tokens can include up to %MAX_OPT_ARGS instances of basic c-style
+ * format identifiers which will be taken into account when matching the
+ * tokens, and whose locations will be returned in the @args array.
+ */
+int match_token(char *s, const match_table_t table, substring_t args[])
+{
+ const struct match_token *p;
+
+ for (p = table; !match_one(s, p->pattern, args) ; p++)
+ ;
+
+ return p->token;
+}
+EXPORT_SYMBOL(match_token);
+
+/**
+ * match_number - scan a number in the given base from a substring_t
+ * @s: substring to be scanned
+ * @result: resulting integer on success
+ * @base: base to use when converting string
+ *
+ * Description: Given a &substring_t and a base, attempts to parse the substring
+ * as a number in that base.
+ *
+ * Return: On success, sets @result to the integer represented by the
+ * string and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+static int match_number(substring_t *s, int *result, int base)
+{
+ char *endp;
+ char buf[NUMBER_BUF_LEN];
+ int ret;
+ long val;
+
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
+ ret = 0;
+ val = simple_strtol(buf, &endp, base);
+ if (endp == buf)
+ ret = -EINVAL;
+ else if (val < (long)INT_MIN || val > (long)INT_MAX)
+ ret = -ERANGE;
+ else
+ *result = (int) val;
+ return ret;
+}
+
+/**
+ * match_u64int - scan a number in the given base from a substring_t
+ * @s: substring to be scanned
+ * @result: resulting u64 on success
+ * @base: base to use when converting string
+ *
+ * Description: Given a &substring_t and a base, attempts to parse the substring
+ * as a number in that base.
+ *
+ * Return: On success, sets @result to the integer represented by the
+ * string and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+static int match_u64int(substring_t *s, u64 *result, int base)
+{
+ char buf[NUMBER_BUF_LEN];
+ int ret;
+ u64 val;
+
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
+ ret = kstrtoull(buf, base, &val);
+ if (!ret)
+ *result = val;
+ return ret;
+}
+
+/**
+ * match_int - scan a decimal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a decimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+int match_int(substring_t *s, int *result)
+{
+ return match_number(s, result, 0);
+}
+EXPORT_SYMBOL(match_int);
+
+/**
+ * match_uint - scan a decimal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a decimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+int match_uint(substring_t *s, unsigned int *result)
+{
+ char buf[NUMBER_BUF_LEN];
+
+ if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN)
+ return -ERANGE;
+
+ return kstrtouint(buf, 10, result);
+}
+EXPORT_SYMBOL(match_uint);
+
+/**
+ * match_u64 - scan a decimal representation of a u64 from
+ * a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting unsigned long long on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a long decimal
+ * integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+int match_u64(substring_t *s, u64 *result)
+{
+ return match_u64int(s, result, 0);
+}
+EXPORT_SYMBOL(match_u64);
+
+/**
+ * match_octal - scan an octal representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as an octal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+int match_octal(substring_t *s, int *result)
+{
+ return match_number(s, result, 8);
+}
+EXPORT_SYMBOL(match_octal);
+
+/**
+ * match_hex - scan a hex representation of an integer from a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting integer on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a hexadecimal integer.
+ *
+ * Return: On success, sets @result to the integer represented by the string
+ * and returns 0. Returns -EINVAL or -ERANGE on failure.
+ */
+int match_hex(substring_t *s, int *result)
+{
+ return match_number(s, result, 16);
+}
+EXPORT_SYMBOL(match_hex);
+
+/**
+ * match_wildcard - parse if a string matches given wildcard pattern
+ * @pattern: wildcard pattern
+ * @str: the string to be parsed
+ *
+ * Description: Parse the string @str to check if matches wildcard
+ * pattern @pattern. The pattern may contain two types of wildcards:
+ * '*' - matches zero or more characters
+ * '?' - matches one character
+ *
+ * Return: If the @str matches the @pattern, return true, else return false.
+ */
+bool match_wildcard(const char *pattern, const char *str)
+{
+ const char *s = str;
+ const char *p = pattern;
+ bool star = false;
+
+ while (*s) {
+ switch (*p) {
+ case '?':
+ s++;
+ p++;
+ break;
+ case '*':
+ star = true;
+ str = s;
+ if (!*++p)
+ return true;
+ pattern = p;
+ break;
+ default:
+ if (*s == *p) {
+ s++;
+ p++;
+ } else {
+ if (!star)
+ return false;
+ str++;
+ s = str;
+ p = pattern;
+ }
+ break;
+ }
+ }
+
+ if (*p == '*')
+ ++p;
+ return !*p;
+}
+EXPORT_SYMBOL(match_wildcard);
+
+/**
+ * match_strlcpy - Copy the characters from a substring_t to a sized buffer
+ * @dest: where to copy to
+ * @src: &substring_t to copy
+ * @size: size of destination buffer
+ *
+ * Description: Copy the characters in &substring_t @src to the
+ * c-style string @dest. Copy no more than @size - 1 characters, plus
+ * the terminating NUL.
+ *
+ * Return: length of @src.
+ */
+size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
+{
+ size_t ret = src->to - src->from;
+
+ if (size) {
+ size_t len = ret >= size ? size - 1 : ret;
+ memcpy(dest, src->from, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+EXPORT_SYMBOL(match_strlcpy);
+
+/**
+ * match_strdup - allocate a new string with the contents of a substring_t
+ * @s: &substring_t to copy
+ *
+ * Description: Allocates and returns a string filled with the contents of
+ * the &substring_t @s. The caller is responsible for freeing the returned
+ * string with kfree().
+ *
+ * Return: the address of the newly allocated NUL-terminated string or
+ * %NULL on error.
+ */
+char *match_strdup(const substring_t *s)
+{
+ return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
+}
+EXPORT_SYMBOL(match_strdup);
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
new file mode 100644
index 000000000..ce39ce9f3
--- /dev/null
+++ b/lib/pci_iomap.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <linux/export.h>
+
+#ifdef CONFIG_PCI
+/**
+ * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (len <= offset || !start)
+ return NULL;
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return __pci_ioport_map(dev, start, len);
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined it's own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+ iounmap(p);
+#endif
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
+
+#endif /* CONFIG_PCI */
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
new file mode 100644
index 000000000..668f6aa6a
--- /dev/null
+++ b/lib/percpu-refcount.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/percpu-refcount.h>
+
+/*
+ * Initially, a percpu refcount is just a set of percpu counters. Initially, we
+ * don't try to detect the ref hitting 0 - which means that get/put can just
+ * increment or decrement the local counter. Note that the counter on a
+ * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
+ * percpu counters will all sum to the correct value
+ *
+ * (More precisely: because modular arithmetic is commutative the sum of all the
+ * percpu_count vars will be equal to what it would have been if all the gets
+ * and puts were done to a single integer, even if some of the percpu integers
+ * overflow or underflow).
+ *
+ * The real trick to implementing percpu refcounts is shutdown. We can't detect
+ * the ref hitting 0 on every put - this would require global synchronization
+ * and defeat the whole purpose of using percpu refs.
+ *
+ * What we do is require the user to keep track of the initial refcount; we know
+ * the ref can't hit 0 before the user drops the initial ref, so as long as we
+ * convert to non percpu mode before the initial ref is dropped everything
+ * works.
+ *
+ * Converting to non percpu mode is done with some RCUish stuff in
+ * percpu_ref_kill. Additionally, we need a bias value so that the
+ * atomic_long_t can't hit 0 before we've added up all the percpu refs.
+ */
+
+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
+
+static DEFINE_SPINLOCK(percpu_ref_switch_lock);
+static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
+
+static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
+{
+ return (unsigned long __percpu *)
+ (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
+}
+
+/**
+ * percpu_ref_init - initialize a percpu refcount
+ * @ref: percpu_ref to initialize
+ * @release: function which will be called when refcount hits 0
+ * @flags: PERCPU_REF_INIT_* flags
+ * @gfp: allocation mask to use
+ *
+ * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
+ * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
+ * change the start state to atomic with the latter setting the initial refcount
+ * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
+ *
+ * Note that @release must not sleep - it may potentially be called from RCU
+ * callback context by percpu_ref_kill().
+ */
+int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
+ unsigned int flags, gfp_t gfp)
+{
+ size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
+ __alignof__(unsigned long));
+ unsigned long start_count = 0;
+ struct percpu_ref_data *data;
+
+ ref->percpu_count_ptr = (unsigned long)
+ __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
+ if (!ref->percpu_count_ptr)
+ return -ENOMEM;
+
+ data = kzalloc(sizeof(*ref->data), gfp);
+ if (!data) {
+ free_percpu((void __percpu *)ref->percpu_count_ptr);
+ ref->percpu_count_ptr = 0;
+ return -ENOMEM;
+ }
+
+ data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+ data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
+
+ if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+ data->allow_reinit = true;
+ } else {
+ start_count += PERCPU_COUNT_BIAS;
+ }
+
+ if (flags & PERCPU_REF_INIT_DEAD)
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ else
+ start_count++;
+
+ atomic_long_set(&data->count, start_count);
+
+ data->release = release;
+ data->confirm_switch = NULL;
+ data->ref = ref;
+ ref->data = data;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(percpu_ref_init);
+
+static void __percpu_ref_exit(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+
+ if (percpu_count) {
+ /* non-NULL confirm_switch indicates switching in progress */
+ WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
+ free_percpu(percpu_count);
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
+ }
+}
+
+/**
+ * percpu_ref_exit - undo percpu_ref_init()
+ * @ref: percpu_ref to exit
+ *
+ * This function exits @ref. The caller is responsible for ensuring that
+ * @ref is no longer in active use. The usual places to invoke this
+ * function from are the @ref->release() callback or in init failure path
+ * where percpu_ref_init() succeeded but other parts of the initialization
+ * of the embedding object failed.
+ */
+void percpu_ref_exit(struct percpu_ref *ref)
+{
+ struct percpu_ref_data *data = ref->data;
+ unsigned long flags;
+
+ __percpu_ref_exit(ref);
+
+ if (!data)
+ return;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+ ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
+ __PERCPU_REF_FLAG_BITS;
+ ref->data = NULL;
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+ kfree(data);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_exit);
+
+static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
+{
+ struct percpu_ref_data *data = container_of(rcu,
+ struct percpu_ref_data, rcu);
+ struct percpu_ref *ref = data->ref;
+
+ data->confirm_switch(ref);
+ data->confirm_switch = NULL;
+ wake_up_all(&percpu_ref_switch_waitq);
+
+ if (!data->allow_reinit)
+ __percpu_ref_exit(ref);
+
+ /* drop ref from percpu_ref_switch_to_atomic() */
+ percpu_ref_put(ref);
+}
+
+static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
+{
+ struct percpu_ref_data *data = container_of(rcu,
+ struct percpu_ref_data, rcu);
+ struct percpu_ref *ref = data->ref;
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ static atomic_t underflows;
+ unsigned long count = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ count += *per_cpu_ptr(percpu_count, cpu);
+
+ pr_debug("global %lu percpu %lu\n",
+ atomic_long_read(&data->count), count);
+
+ /*
+ * It's crucial that we sum the percpu counters _before_ adding the sum
+ * to &ref->count; since gets could be happening on one cpu while puts
+ * happen on another, adding a single cpu's count could cause
+ * @ref->count to hit 0 before we've got a consistent value - but the
+ * sum of all the counts will be consistent and correct.
+ *
+ * Subtracting the bias value then has to happen _after_ adding count to
+ * &ref->count; we need the bias value to prevent &ref->count from
+ * reaching 0 before we add the percpu counts. But doing it at the same
+ * time is equivalent and saves us atomic operations:
+ */
+ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
+
+ if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
+ "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
+ data->release, atomic_long_read(&data->count)) &&
+ atomic_inc_return(&underflows) < 4) {
+ pr_err("%s(): percpu_ref underflow", __func__);
+ mem_dump_obj(data);
+ }
+
+ /* @ref is viewed as dead on all CPUs, send out switch confirmation */
+ percpu_ref_call_confirm_rcu(rcu);
+}
+
+static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
+{
+}
+
+static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
+ if (confirm_switch)
+ confirm_switch(ref);
+ return;
+ }
+
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that switching is
+ * in progress. Use noop one if unspecified.
+ */
+ ref->data->confirm_switch = confirm_switch ?:
+ percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_hurry(&ref->data->rcu,
+ percpu_ref_switch_to_atomic_rcu);
+}
+
+static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+ int cpu;
+
+ BUG_ON(!percpu_count);
+
+ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
+ return;
+
+ if (WARN_ON_ONCE(!ref->data->allow_reinit))
+ return;
+
+ atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
+
+ /*
+ * Restore per-cpu operation. smp_store_release() is paired
+ * with READ_ONCE() in __ref_is_percpu() and guarantees that the
+ * zeroing is visible to all percpu accesses which can see the
+ * following __PERCPU_REF_ATOMIC clearing.
+ */
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(percpu_count, cpu) = 0;
+
+ smp_store_release(&ref->percpu_count_ptr,
+ ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
+}
+
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ struct percpu_ref_data *data = ref->data;
+
+ lockdep_assert_held(&percpu_ref_switch_lock);
+
+ /*
+ * If the previous ATOMIC switching hasn't finished yet, wait for
+ * its completion. If the caller ensures that ATOMIC switching
+ * isn't in progress, this function can be called from any context.
+ */
+ wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
+ percpu_ref_switch_lock);
+
+ if (data->force_atomic || percpu_ref_is_dying(ref))
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ else
+ __percpu_ref_switch_to_percpu(ref);
+}
+
+/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+ ref->data->force_atomic = true;
+ __percpu_ref_switch_mode(ref, confirm_switch);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
+
+/**
+ * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ *
+ * Schedule switching the ref to atomic mode, and wait for the
+ * switch to complete. Caller must ensure that no other thread
+ * will switch back to percpu mode.
+ */
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
+{
+ percpu_ref_switch_to_atomic(ref, NULL);
+ wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
+
+/**
+ * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
+ * @ref: percpu_ref to switch to percpu mode
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * To re-use an expired ref, use percpu_ref_reinit().
+ *
+ * Switch @ref to percpu mode. This function may be invoked concurrently
+ * with all the get/put operations and can safely be mixed with kill and
+ * reinit operations. This function reverses the sticky atomic state set
+ * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
+ * dying or dead, the actual switching takes place on the following
+ * percpu_ref_reinit().
+ *
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
+ */
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+ ref->data->force_atomic = false;
+ __percpu_ref_switch_mode(ref, NULL);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
+
+/**
+ * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
+ * @ref: percpu_ref to kill
+ * @confirm_kill: optional confirmation callback
+ *
+ * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
+ * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
+ * called after @ref is seen as dead from all CPUs at which point all
+ * further invocations of percpu_ref_tryget_live() will fail. See
+ * percpu_ref_tryget_live() for details.
+ *
+ * This function normally doesn't block and can be called from any context
+ * but it may block if @confirm_kill is specified and @ref is in the
+ * process of switching to atomic mode by percpu_ref_switch_to_atomic().
+ *
+ * There are no implied RCU grace periods between kill and release.
+ */
+void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_kill)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+ WARN_ONCE(percpu_ref_is_dying(ref),
+ "%s called more than once on %ps!", __func__,
+ ref->data->release);
+
+ ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
+ __percpu_ref_switch_mode(ref, confirm_kill);
+ percpu_ref_put(ref);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
+
+/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ unsigned long count, flags;
+
+ if (__ref_is_percpu(ref, &percpu_count))
+ return false;
+
+ /* protect us from being destroyed */
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+ if (ref->data)
+ count = atomic_long_read(&ref->data->count);
+ else
+ count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+ return count == 0;
+}
+EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
+
+/**
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
+ *
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
+ * initialized successfully and reached 0 but not exited.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_reinit(struct percpu_ref *ref)
+{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ percpu_ref_resurrect(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_resurrect - modify a percpu refcount from dead to live
+ * @ref: perpcu_ref to resurrect
+ *
+ * Modify @ref so that it's in the same state as before percpu_ref_kill() was
+ * called. @ref must be dead but must not yet have exited.
+ *
+ * If @ref->release() frees @ref then the caller is responsible for
+ * guaranteeing that @ref->release() does not get called while this
+ * function is in progress.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_resurrect(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+ WARN_ON_ONCE(!percpu_ref_is_dying(ref));
+ WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
+
+ ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
+ percpu_ref_get(ref);
+ __percpu_ref_switch_mode(ref, NULL);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
new file mode 100644
index 000000000..9073430dc
--- /dev/null
+++ b/lib/percpu_counter.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fast batching percpu counters.
+ */
+
+#include <linux/percpu_counter.h>
+#include <linux/mutex.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/debugobjects.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+static LIST_HEAD(percpu_counters);
+static DEFINE_SPINLOCK(percpu_counters_lock);
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
+
+static const struct debug_obj_descr percpu_counter_debug_descr;
+
+static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct percpu_counter *fbc = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ percpu_counter_destroy(fbc);
+ debug_object_free(fbc, &percpu_counter_debug_descr);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct debug_obj_descr percpu_counter_debug_descr = {
+ .name = "percpu_counter",
+ .fixup_free = percpu_counter_fixup_free,
+};
+
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{
+ debug_object_init(fbc, &percpu_counter_debug_descr);
+ debug_object_activate(fbc, &percpu_counter_debug_descr);
+}
+
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{
+ debug_object_deactivate(fbc, &percpu_counter_debug_descr);
+ debug_object_free(fbc, &percpu_counter_debug_descr);
+}
+
+#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{ }
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{ }
+#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+
+void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+ int cpu;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ for_each_possible_cpu(cpu) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ *pcount = 0;
+ }
+ fbc->count = amount;
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_set);
+
+/*
+ * local_irq_save() is needed to make the function irq safe:
+ * - The slow path would be ok as protected by an irq-safe spinlock.
+ * - this_cpu_add would be ok as it is irq-safe by definition.
+ * But:
+ * The decision slow path/fast path and the actual update must be atomic, too.
+ * Otherwise a call in process context could check the current values and
+ * decide that the fast path can be used. If now an interrupt occurs before
+ * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
+ * then the this_cpu_add() that is executed after the interrupt has completed
+ * can produce values larger than "batch" or even overflows.
+ */
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+ s64 count;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ count = __this_cpu_read(*fbc->counters) + amount;
+ if (abs(count) >= batch) {
+ raw_spin_lock(&fbc->lock);
+ fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count - amount);
+ raw_spin_unlock(&fbc->lock);
+ } else {
+ this_cpu_add(*fbc->counters, amount);
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(percpu_counter_add_batch);
+
+/*
+ * For percpu_counter with a big batch, the devication of its count could
+ * be big, and there is requirement to reduce the deviation, like when the
+ * counter's batch could be runtime decreased to get a better accuracy,
+ * which can be achieved by running this sync function on each CPU.
+ */
+void percpu_counter_sync(struct percpu_counter *fbc)
+{
+ unsigned long flags;
+ s64 count;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count;
+ __this_cpu_sub(*fbc->counters, count);
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+}
+EXPORT_SYMBOL(percpu_counter_sync);
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive().
+ *
+ * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
+ * from CPUs that are in the process of being taken offline. Dying cpus have
+ * been removed from the online mask, but may not have had the hotplug dead
+ * notifier called to fold the percpu count back into the global counter sum.
+ * By including dying CPUs in the iteration mask, we avoid this race condition
+ * so __percpu_counter_sum() just does the right thing when CPUs are being taken
+ * offline.
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
+{
+ s64 ret;
+ int cpu;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&fbc->lock, flags);
+ ret = fbc->count;
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
+ s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
+ ret += *pcount;
+ }
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(__percpu_counter_sum);
+
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key)
+{
+ unsigned long flags __maybe_unused;
+ size_t counter_size;
+ s32 __percpu *counters;
+ u32 i;
+
+ counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
+ counters = __alloc_percpu_gfp(nr_counters * counter_size,
+ __alignof__(*counters), gfp);
+ if (!counters) {
+ fbc[0].counters = NULL;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_counters; i++) {
+ raw_spin_lock_init(&fbc[i].lock);
+ lockdep_set_class(&fbc[i].lock, key);
+#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc[i].list);
+#endif
+ fbc[i].count = amount;
+ fbc[i].counters = (void *)counters + (i * counter_size);
+
+ debug_percpu_counter_activate(&fbc[i]);
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ spin_lock_irqsave(&percpu_counters_lock, flags);
+ for (i = 0; i < nr_counters; i++)
+ list_add(&fbc[i].list, &percpu_counters);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(__percpu_counter_init_many);
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
+{
+ unsigned long flags __maybe_unused;
+ u32 i;
+
+ if (WARN_ON_ONCE(!fbc))
+ return;
+
+ if (!fbc[0].counters)
+ return;
+
+ for (i = 0; i < nr_counters; i++)
+ debug_percpu_counter_deactivate(&fbc[i]);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ spin_lock_irqsave(&percpu_counters_lock, flags);
+ for (i = 0; i < nr_counters; i++)
+ list_del(&fbc[i].list);
+ spin_unlock_irqrestore(&percpu_counters_lock, flags);
+#endif
+
+ free_percpu(fbc[0].counters);
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].counters = NULL;
+}
+EXPORT_SYMBOL(percpu_counter_destroy_many);
+
+int percpu_counter_batch __read_mostly = 32;
+EXPORT_SYMBOL(percpu_counter_batch);
+
+static int compute_batch_value(unsigned int cpu)
+{
+ int nr = num_online_cpus();
+
+ percpu_counter_batch = max(32, nr*2);
+ return 0;
+}
+
+static int percpu_counter_cpu_dead(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ struct percpu_counter *fbc;
+
+ compute_batch_value(cpu);
+
+ spin_lock_irq(&percpu_counters_lock);
+ list_for_each_entry(fbc, &percpu_counters, list) {
+ s32 *pcount;
+
+ raw_spin_lock(&fbc->lock);
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ fbc->count += *pcount;
+ *pcount = 0;
+ raw_spin_unlock(&fbc->lock);
+ }
+ spin_unlock_irq(&percpu_counters_lock);
+#endif
+ return 0;
+}
+
+/*
+ * Compare counter against given value.
+ * Return 1 if greater, 0 if equal and -1 if less
+ */
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+ s64 count;
+
+ count = percpu_counter_read(fbc);
+ /* Check to see if rough count will be sufficient for comparison */
+ if (abs(count - rhs) > (batch * num_online_cpus())) {
+ if (count > rhs)
+ return 1;
+ else
+ return -1;
+ }
+ /* Need to use precise count */
+ count = percpu_counter_sum(fbc);
+ if (count > rhs)
+ return 1;
+ else if (count < rhs)
+ return -1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(__percpu_counter_compare);
+
+static int __init percpu_counter_startup(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
+ compute_batch_value, NULL);
+ WARN_ON(ret < 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
+ "lib/percpu_cnt:dead", NULL,
+ percpu_counter_cpu_dead);
+ WARN_ON(ret < 0);
+ return 0;
+}
+module_init(percpu_counter_startup);
diff --git a/lib/percpu_test.c b/lib/percpu_test.c
new file mode 100644
index 000000000..4a3d70bbc
--- /dev/null
+++ b/lib/percpu_test.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+
+/* validate @native and @pcp counter values match @expected */
+#define CHECK(native, pcp, expected) \
+ do { \
+ WARN((native) != (expected), \
+ "raw %ld (0x%lx) != expected %lld (0x%llx)", \
+ (native), (native), \
+ (long long)(expected), (long long)(expected)); \
+ WARN(__this_cpu_read(pcp) != (expected), \
+ "pcp %ld (0x%lx) != expected %lld (0x%llx)", \
+ __this_cpu_read(pcp), __this_cpu_read(pcp), \
+ (long long)(expected), (long long)(expected)); \
+ } while (0)
+
+static DEFINE_PER_CPU(long, long_counter);
+static DEFINE_PER_CPU(unsigned long, ulong_counter);
+
+static int __init percpu_test_init(void)
+{
+ /*
+ * volatile prevents compiler from optimizing it uses, otherwise the
+ * +ul_one/-ul_one below would replace with inc/dec instructions.
+ */
+ volatile unsigned int ui_one = 1;
+ long l = 0;
+ unsigned long ul = 0;
+
+ pr_info("percpu test start\n");
+
+ preempt_disable();
+
+ l += -1;
+ __this_cpu_add(long_counter, -1);
+ CHECK(l, long_counter, -1);
+
+ l += 1;
+ __this_cpu_add(long_counter, 1);
+ CHECK(l, long_counter, 0);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul += 1UL;
+ __this_cpu_add(ulong_counter, 1UL);
+ CHECK(ul, ulong_counter, 1);
+
+ ul += -1UL;
+ __this_cpu_add(ulong_counter, -1UL);
+ CHECK(ul, ulong_counter, 0);
+
+ ul += -(unsigned long)1;
+ __this_cpu_add(ulong_counter, -(unsigned long)1);
+ CHECK(ul, ulong_counter, -1);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul -= 1;
+ __this_cpu_dec(ulong_counter);
+ CHECK(ul, ulong_counter, -1);
+ CHECK(ul, ulong_counter, ULONG_MAX);
+
+ l += -ui_one;
+ __this_cpu_add(long_counter, -ui_one);
+ CHECK(l, long_counter, 0xffffffff);
+
+ l += ui_one;
+ __this_cpu_add(long_counter, ui_one);
+ CHECK(l, long_counter, (long)0x100000000LL);
+
+
+ l = 0;
+ __this_cpu_write(long_counter, 0);
+
+ l -= ui_one;
+ __this_cpu_sub(long_counter, ui_one);
+ CHECK(l, long_counter, -1);
+
+ l = 0;
+ __this_cpu_write(long_counter, 0);
+
+ l += ui_one;
+ __this_cpu_add(long_counter, ui_one);
+ CHECK(l, long_counter, 1);
+
+ l += -ui_one;
+ __this_cpu_add(long_counter, -ui_one);
+ CHECK(l, long_counter, (long)0x100000000LL);
+
+ l = 0;
+ __this_cpu_write(long_counter, 0);
+
+ l -= ui_one;
+ this_cpu_sub(long_counter, ui_one);
+ CHECK(l, long_counter, -1);
+ CHECK(l, long_counter, ULONG_MAX);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul += ui_one;
+ __this_cpu_add(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, 1);
+
+ ul = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul -= ui_one;
+ __this_cpu_sub(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, -1);
+ CHECK(ul, ulong_counter, ULONG_MAX);
+
+ ul = 3;
+ __this_cpu_write(ulong_counter, 3);
+
+ ul = this_cpu_sub_return(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, 2);
+
+ ul = __this_cpu_sub_return(ulong_counter, ui_one);
+ CHECK(ul, ulong_counter, 1);
+
+ preempt_enable();
+
+ pr_info("percpu test done\n");
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void __exit percpu_test_exit(void)
+{
+}
+
+module_init(percpu_test_init)
+module_exit(percpu_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Thelen");
+MODULE_DESCRIPTION("percpu operations test");
diff --git a/lib/pldmfw/Makefile b/lib/pldmfw/Makefile
new file mode 100644
index 000000000..99ad10711
--- /dev/null
+++ b/lib/pldmfw/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PLDMFW) += pldmfw.o
diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c
new file mode 100644
index 000000000..54e1809a3
--- /dev/null
+++ b/lib/pldmfw/pldmfw.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#include <asm/unaligned.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pldmfw.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include "pldmfw_private.h"
+
+/* Internal structure used to store details about the PLDM image file as it is
+ * being validated and processed.
+ */
+struct pldmfw_priv {
+ struct pldmfw *context;
+ const struct firmware *fw;
+
+ /* current offset of firmware image */
+ size_t offset;
+
+ struct list_head records;
+ struct list_head components;
+
+ /* PLDM Firmware Package Header */
+ const struct __pldm_header *header;
+ u16 total_header_size;
+
+ /* length of the component bitmap */
+ u16 component_bitmap_len;
+ u16 bitmap_size;
+
+ /* Start of the component image information */
+ u16 component_count;
+ const u8 *component_start;
+
+ /* Start pf the firmware device id records */
+ const u8 *record_start;
+ u8 record_count;
+
+ /* The CRC at the end of the package header */
+ u32 header_crc;
+
+ struct pldmfw_record *matching_record;
+};
+
+/**
+ * pldm_check_fw_space - Verify that the firmware image has space left
+ * @data: pointer to private data
+ * @offset: offset to start from
+ * @length: length to check for
+ *
+ * Verify that the firmware data can hold a chunk of bytes with the specified
+ * offset and length.
+ *
+ * Returns: zero on success, or -EFAULT if the image does not have enough
+ * space left to fit the expected length.
+ */
+static int
+pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length)
+{
+ size_t expected_size = offset + length;
+ struct device *dev = data->context->dev;
+
+ if (data->fw->size < expected_size) {
+ dev_dbg(dev, "Firmware file size smaller than expected. Got %zu bytes, needed %zu bytes\n",
+ data->fw->size, expected_size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_move_fw_offset - Move the current firmware offset forward
+ * @data: pointer to private data
+ * @bytes_to_move: number of bytes to move the offset forward by
+ *
+ * Check that there is enough space past the current offset, and then move the
+ * offset forward by this amount.
+ *
+ * Returns: zero on success, or -EFAULT if the image is too small to fit the
+ * expected length.
+ */
+static int
+pldm_move_fw_offset(struct pldmfw_priv *data, size_t bytes_to_move)
+{
+ int err;
+
+ err = pldm_check_fw_space(data, data->offset, bytes_to_move);
+ if (err)
+ return err;
+
+ data->offset += bytes_to_move;
+
+ return 0;
+}
+
+/**
+ * pldm_parse_header - Validate and extract details about the PLDM header
+ * @data: pointer to private data
+ *
+ * Performs initial basic verification of the PLDM image, up to the first
+ * firmware record.
+ *
+ * This includes the following checks and extractions
+ *
+ * * Verify that the UUID at the start of the header matches the expected
+ * value as defined in the DSP0267 PLDM specification
+ * * Check that the revision is 0x01
+ * * Extract the total header_size and verify that the image is large enough
+ * to contain at least the length of this header
+ * * Extract the size of the component bitmap length
+ * * Extract a pointer to the start of the record area
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_header(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_record_area *record_area;
+ struct device *dev = data->context->dev;
+ const struct __pldm_header *header;
+ size_t header_size;
+ int err;
+
+ err = pldm_move_fw_offset(data, sizeof(*header));
+ if (err)
+ return err;
+
+ header = (const struct __pldm_header *)data->fw->data;
+ data->header = header;
+
+ if (!uuid_equal(&header->id, &pldm_firmware_header_id)) {
+ dev_dbg(dev, "Invalid package header identifier. Expected UUID %pUB, but got %pUB\n",
+ &pldm_firmware_header_id, &header->id);
+ return -EINVAL;
+ }
+
+ if (header->revision != PACKAGE_HEADER_FORMAT_REVISION) {
+ dev_dbg(dev, "Invalid package header revision. Expected revision %u but got %u\n",
+ PACKAGE_HEADER_FORMAT_REVISION, header->revision);
+ return -EOPNOTSUPP;
+ }
+
+ data->total_header_size = get_unaligned_le16(&header->size);
+ header_size = data->total_header_size - sizeof(*header);
+
+ err = pldm_check_fw_space(data, data->offset, header_size);
+ if (err)
+ return err;
+
+ data->component_bitmap_len =
+ get_unaligned_le16(&header->component_bitmap_len);
+
+ if (data->component_bitmap_len % 8 != 0) {
+ dev_dbg(dev, "Invalid component bitmap length. The length is %u, which is not a multiple of 8\n",
+ data->component_bitmap_len);
+ return -EINVAL;
+ }
+
+ data->bitmap_size = data->component_bitmap_len / 8;
+
+ err = pldm_move_fw_offset(data, header->version_len);
+ if (err)
+ return err;
+
+ /* extract a pointer to the record area, which just follows the main
+ * PLDM header data.
+ */
+ record_area = (const struct __pldmfw_record_area *)(data->fw->data +
+ data->offset);
+
+ err = pldm_move_fw_offset(data, sizeof(*record_area));
+ if (err)
+ return err;
+
+ data->record_count = record_area->record_count;
+ data->record_start = record_area->records;
+
+ return 0;
+}
+
+/**
+ * pldm_check_desc_tlv_len - Check that the length matches expectation
+ * @data: pointer to image details
+ * @type: the descriptor type
+ * @size: the length from the descriptor header
+ *
+ * If the descriptor type is one of the documented descriptor types according
+ * to the standard, verify that the provided length matches.
+ *
+ * If the type is not recognized or is VENDOR_DEFINED, return zero.
+ *
+ * Returns: zero on success, or -EINVAL if the specified size of a standard
+ * TLV does not match the expected value defined for that TLV.
+ */
+static int
+pldm_check_desc_tlv_len(struct pldmfw_priv *data, u16 type, u16 size)
+{
+ struct device *dev = data->context->dev;
+ u16 expected_size;
+
+ switch (type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ expected_size = 2;
+ break;
+ case PLDM_DESC_ID_PCI_REVISION_ID:
+ expected_size = 1;
+ break;
+ case PLDM_DESC_ID_PNP_VENDOR_ID:
+ expected_size = 3;
+ break;
+ case PLDM_DESC_ID_IANA_ENTERPRISE_ID:
+ case PLDM_DESC_ID_ACPI_VENDOR_ID:
+ case PLDM_DESC_ID_PNP_PRODUCT_ID:
+ case PLDM_DESC_ID_ACPI_PRODUCT_ID:
+ expected_size = 4;
+ break;
+ case PLDM_DESC_ID_UUID:
+ expected_size = 16;
+ break;
+ case PLDM_DESC_ID_VENDOR_DEFINED:
+ return 0;
+ default:
+ /* Do not report an error on an unexpected TLV */
+ dev_dbg(dev, "Found unrecognized TLV type 0x%04x\n", type);
+ return 0;
+ }
+
+ if (size != expected_size) {
+ dev_dbg(dev, "Found TLV type 0x%04x with unexpected length. Got %u bytes, but expected %u bytes\n",
+ type, size, expected_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_desc_tlvs - Check and skip past a number of TLVs
+ * @data: pointer to private data
+ * @record: pointer to the record this TLV belongs too
+ * @desc_count: descriptor count
+ *
+ * From the current offset, read and extract the descriptor TLVs, updating the
+ * current offset each time.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc_count)
+{
+ const struct __pldmfw_desc_tlv *__desc;
+ const u8 *desc_start;
+ u8 i;
+
+ desc_start = data->fw->data + data->offset;
+
+ pldm_for_each_desc_tlv(i, __desc, desc_start, desc_count) {
+ struct pldmfw_desc_tlv *desc;
+ int err;
+ u16 type, size;
+
+ err = pldm_move_fw_offset(data, sizeof(*__desc));
+ if (err)
+ return err;
+
+ type = get_unaligned_le16(&__desc->type);
+
+ /* According to DSP0267, this only includes the data field */
+ size = get_unaligned_le16(&__desc->size);
+
+ err = pldm_check_desc_tlv_len(data, type, size);
+ if (err)
+ return err;
+
+ /* check that we have space and move the offset forward */
+ err = pldm_move_fw_offset(data, size);
+ if (err)
+ return err;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->type = type;
+ desc->size = size;
+ desc->data = __desc->data;
+
+ list_add_tail(&desc->entry, &record->descs);
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_one_record - Verify size of one PLDM record
+ * @data: pointer to image details
+ * @__record: pointer to the record to check
+ *
+ * This function checks that the record size does not exceed either the size
+ * of the firmware file or the total length specified in the header section.
+ *
+ * It also verifies that the recorded length of the start of the record
+ * matches the size calculated by adding the static structure length, the
+ * component bitmap length, the version string length, the length of all
+ * descriptor TLVs, and the length of the package data.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_parse_one_record(struct pldmfw_priv *data,
+ const struct __pldmfw_record_info *__record)
+{
+ struct pldmfw_record *record;
+ size_t measured_length;
+ int err;
+ const u8 *bitmap_ptr;
+ u16 record_len;
+ int i;
+
+ /* Make a copy and insert it into the record list */
+ record = kzalloc(sizeof(*record), GFP_KERNEL);
+ if (!record)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&record->descs);
+ list_add_tail(&record->entry, &data->records);
+
+ /* Then check that we have space and move the offset */
+ err = pldm_move_fw_offset(data, sizeof(*__record));
+ if (err)
+ return err;
+
+ record_len = get_unaligned_le16(&__record->record_len);
+ record->package_data_len = get_unaligned_le16(&__record->package_data_len);
+ record->version_len = __record->version_len;
+ record->version_type = __record->version_type;
+
+ bitmap_ptr = data->fw->data + data->offset;
+
+ /* check that we have space for the component bitmap length */
+ err = pldm_move_fw_offset(data, data->bitmap_size);
+ if (err)
+ return err;
+
+ record->component_bitmap_len = data->component_bitmap_len;
+ record->component_bitmap = bitmap_zalloc(record->component_bitmap_len,
+ GFP_KERNEL);
+ if (!record->component_bitmap)
+ return -ENOMEM;
+
+ for (i = 0; i < data->bitmap_size; i++)
+ bitmap_set_value8(record->component_bitmap, bitmap_ptr[i], i * 8);
+
+ record->version_string = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, record->version_len);
+ if (err)
+ return err;
+
+ /* Scan through the descriptor TLVs and find the end */
+ err = pldm_parse_desc_tlvs(data, record, __record->descriptor_count);
+ if (err)
+ return err;
+
+ record->package_data = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, record->package_data_len);
+ if (err)
+ return err;
+
+ measured_length = data->offset - ((const u8 *)__record - data->fw->data);
+ if (measured_length != record_len) {
+ dev_dbg(data->context->dev, "Unexpected record length. Measured record length is %zu bytes, expected length is %u bytes\n",
+ measured_length, record_len);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_parse_records - Locate the start of the component area
+ * @data: pointer to private data
+ *
+ * Extract the record count, and loop through each record, searching for the
+ * component area.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_records(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_component_area *component_area;
+ const struct __pldmfw_record_info *record;
+ int err;
+ u8 i;
+
+ pldm_for_each_record(i, record, data->record_start, data->record_count) {
+ err = pldm_parse_one_record(data, record);
+ if (err)
+ return err;
+ }
+
+ /* Extract a pointer to the component area, which just follows the
+ * PLDM device record data.
+ */
+ component_area = (const struct __pldmfw_component_area *)(data->fw->data + data->offset);
+
+ err = pldm_move_fw_offset(data, sizeof(*component_area));
+ if (err)
+ return err;
+
+ data->component_count =
+ get_unaligned_le16(&component_area->component_image_count);
+ data->component_start = component_area->components;
+
+ return 0;
+}
+
+/**
+ * pldm_parse_components - Locate the CRC header checksum
+ * @data: pointer to private data
+ *
+ * Extract the component count, and find the pointer to the component area.
+ * Scan through each component searching for the end, which should point to
+ * the package header checksum.
+ *
+ * Extract the package header CRC and save it for verification.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_components(struct pldmfw_priv *data)
+{
+ const struct __pldmfw_component_info *__component;
+ struct device *dev = data->context->dev;
+ const u8 *header_crc_ptr;
+ int err;
+ u8 i;
+
+ pldm_for_each_component(i, __component, data->component_start, data->component_count) {
+ struct pldmfw_component *component;
+ u32 offset, size;
+
+ err = pldm_move_fw_offset(data, sizeof(*__component));
+ if (err)
+ return err;
+
+ err = pldm_move_fw_offset(data, __component->version_len);
+ if (err)
+ return err;
+
+ offset = get_unaligned_le32(&__component->location_offset);
+ size = get_unaligned_le32(&__component->size);
+
+ err = pldm_check_fw_space(data, offset, size);
+ if (err)
+ return err;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->index = i;
+ component->classification = get_unaligned_le16(&__component->classification);
+ component->identifier = get_unaligned_le16(&__component->identifier);
+ component->comparison_stamp = get_unaligned_le32(&__component->comparison_stamp);
+ component->options = get_unaligned_le16(&__component->options);
+ component->activation_method = get_unaligned_le16(&__component->activation_method);
+ component->version_type = __component->version_type;
+ component->version_len = __component->version_len;
+ component->version_string = __component->version_string;
+ component->component_data = data->fw->data + offset;
+ component->component_size = size;
+
+ list_add_tail(&component->entry, &data->components);
+ }
+
+ header_crc_ptr = data->fw->data + data->offset;
+
+ err = pldm_move_fw_offset(data, sizeof(data->header_crc));
+ if (err)
+ return err;
+
+ /* Make sure that we reached the expected offset */
+ if (data->offset != data->total_header_size) {
+ dev_dbg(dev, "Invalid firmware header size. Expected %u but got %zu\n",
+ data->total_header_size, data->offset);
+ return -EFAULT;
+ }
+
+ data->header_crc = get_unaligned_le32(header_crc_ptr);
+
+ return 0;
+}
+
+/**
+ * pldm_verify_header_crc - Verify that the CRC in the header matches
+ * @data: pointer to private data
+ *
+ * Calculates the 32-bit CRC using the standard IEEE 802.3 CRC polynomial and
+ * compares it to the value stored in the header.
+ *
+ * Returns: zero on success if the CRC matches, or -EBADMSG on an invalid CRC.
+ */
+static int pldm_verify_header_crc(struct pldmfw_priv *data)
+{
+ struct device *dev = data->context->dev;
+ u32 calculated_crc;
+ size_t length;
+
+ /* Calculate the 32-bit CRC of the header header contents up to but
+ * not including the checksum. Note that the Linux crc32_le function
+ * does not perform an expected final XOR.
+ */
+ length = data->offset - sizeof(data->header_crc);
+ calculated_crc = crc32_le(~0, data->fw->data, length) ^ ~0;
+
+ if (calculated_crc != data->header_crc) {
+ dev_dbg(dev, "Invalid CRC in firmware header. Got 0x%08x but expected 0x%08x\n",
+ calculated_crc, data->header_crc);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+/**
+ * pldmfw_free_priv - Free memory allocated while parsing the PLDM image
+ * @data: pointer to the PLDM data structure
+ *
+ * Loops through and clears all allocated memory associated with each
+ * allocated descriptor, record, and component.
+ */
+static void pldmfw_free_priv(struct pldmfw_priv *data)
+{
+ struct pldmfw_component *component, *c_safe;
+ struct pldmfw_record *record, *r_safe;
+ struct pldmfw_desc_tlv *desc, *d_safe;
+
+ list_for_each_entry_safe(component, c_safe, &data->components, entry) {
+ list_del(&component->entry);
+ kfree(component);
+ }
+
+ list_for_each_entry_safe(record, r_safe, &data->records, entry) {
+ list_for_each_entry_safe(desc, d_safe, &record->descs, entry) {
+ list_del(&desc->entry);
+ kfree(desc);
+ }
+
+ if (record->component_bitmap) {
+ bitmap_free(record->component_bitmap);
+ record->component_bitmap = NULL;
+ }
+
+ list_del(&record->entry);
+ kfree(record);
+ }
+}
+
+/**
+ * pldm_parse_image - parse and extract details from PLDM image
+ * @data: pointer to private data
+ *
+ * Verify that the firmware file contains valid data for a PLDM firmware
+ * file. Extract useful pointers and data from the firmware file and store
+ * them in the data structure.
+ *
+ * The PLDM firmware file format is defined in DMTF DSP0267 1.0.0. Care
+ * should be taken to use get_unaligned_le* when accessing data from the
+ * pointers in data.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_parse_image(struct pldmfw_priv *data)
+{
+ int err;
+
+ if (WARN_ON(!(data->context->dev && data->fw->data && data->fw->size)))
+ return -EINVAL;
+
+ err = pldm_parse_header(data);
+ if (err)
+ return err;
+
+ err = pldm_parse_records(data);
+ if (err)
+ return err;
+
+ err = pldm_parse_components(data);
+ if (err)
+ return err;
+
+ return pldm_verify_header_crc(data);
+}
+
+/* these are u32 so that we can store PCI_ANY_ID */
+struct pldm_pci_record_id {
+ int vendor;
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+};
+
+/**
+ * pldmfw_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine of the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
+ */
+ if (value)
+ *ptr = (int)value;
+ else
+ *ptr = PCI_ANY_ID;
+ }
+
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device) &&
+ (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device))
+ return true;
+ else
+ return false;
+}
+EXPORT_SYMBOL(pldmfw_op_pci_match_record);
+
+/**
+ * pldm_find_matching_record - Find the first matching PLDM record
+ * @data: pointer to private data
+ *
+ * Search through PLDM records and find the first matching entry. It is
+ * expected that only one entry matches.
+ *
+ * Store a pointer to the matching record, if found.
+ *
+ * Returns: zero on success, or -ENOENT if no matching record is found.
+ */
+static int pldm_find_matching_record(struct pldmfw_priv *data)
+{
+ struct pldmfw_record *record;
+
+ list_for_each_entry(record, &data->records, entry) {
+ if (data->context->ops->match_record(data->context, record)) {
+ data->matching_record = record;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pldm_send_package_data - Send firmware the package data for the record
+ * @data: pointer to private data
+ *
+ * Send the package data associated with the matching record to the firmware,
+ * using the send_pkg_data operation.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_send_package_data(struct pldmfw_priv *data)
+{
+ struct pldmfw_record *record = data->matching_record;
+ const struct pldmfw_ops *ops = data->context->ops;
+
+ return ops->send_package_data(data->context, record->package_data,
+ record->package_data_len);
+}
+
+/**
+ * pldm_send_component_tables - Send component table information to firmware
+ * @data: pointer to private data
+ *
+ * Loop over each component, sending the applicable components to the firmware
+ * via the send_component_table operation.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+pldm_send_component_tables(struct pldmfw_priv *data)
+{
+ unsigned long *bitmap = data->matching_record->component_bitmap;
+ struct pldmfw_component *component;
+ int err;
+
+ list_for_each_entry(component, &data->components, entry) {
+ u8 index = component->index, transfer_flag = 0;
+
+ /* Skip components which are not intended for this device */
+ if (!test_bit(index, bitmap))
+ continue;
+
+ /* determine whether this is the start, middle, end, or both
+ * the start and end of the component tables
+ */
+ if (index == find_first_bit(bitmap, data->component_bitmap_len))
+ transfer_flag |= PLDM_TRANSFER_FLAG_START;
+ if (index == find_last_bit(bitmap, data->component_bitmap_len))
+ transfer_flag |= PLDM_TRANSFER_FLAG_END;
+ if (!transfer_flag)
+ transfer_flag = PLDM_TRANSFER_FLAG_MIDDLE;
+
+ err = data->context->ops->send_component_table(data->context,
+ component,
+ transfer_flag);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_flash_components - Program each component to device flash
+ * @data: pointer to private data
+ *
+ * Loop through each component that is active for the matching device record,
+ * and send it to the device driver for flashing.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int pldm_flash_components(struct pldmfw_priv *data)
+{
+ unsigned long *bitmap = data->matching_record->component_bitmap;
+ struct pldmfw_component *component;
+ int err;
+
+ list_for_each_entry(component, &data->components, entry) {
+ u8 index = component->index;
+
+ /* Skip components which are not intended for this device */
+ if (!test_bit(index, bitmap))
+ continue;
+
+ err = data->context->ops->flash_component(data->context, component);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * pldm_finalize_update - Finalize the device flash update
+ * @data: pointer to private data
+ *
+ * Tell the device driver to perform any remaining logic to complete the
+ * device update.
+ *
+ * Returns: zero on success, or a PLFM_FWU error indicating the reason for
+ * failure.
+ */
+static int pldm_finalize_update(struct pldmfw_priv *data)
+{
+ if (data->context->ops->finalize_update)
+ return data->context->ops->finalize_update(data->context);
+
+ return 0;
+}
+
+/**
+ * pldmfw_flash_image - Write a PLDM-formatted firmware image to the device
+ * @context: ops and data for firmware update
+ * @fw: firmware object pointing to the relevant firmware file to program
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the device firmware. Extract and write the flash data for each of the
+ * components indicated in the firmware file.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw)
+{
+ struct pldmfw_priv *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->records);
+ INIT_LIST_HEAD(&data->components);
+
+ data->fw = fw;
+ data->context = context;
+
+ err = pldm_parse_image(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_find_matching_record(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_send_package_data(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_send_component_tables(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_flash_components(data);
+ if (err)
+ goto out_release_data;
+
+ err = pldm_finalize_update(data);
+
+out_release_data:
+ pldmfw_free_priv(data);
+ kfree(data);
+
+ return err;
+}
+EXPORT_SYMBOL(pldmfw_flash_image);
+
+MODULE_AUTHOR("Jacob Keller <jacob.e.keller@intel.com>");
+MODULE_DESCRIPTION("PLDM firmware flash update library");
diff --git a/lib/pldmfw/pldmfw_private.h b/lib/pldmfw/pldmfw_private.h
new file mode 100644
index 000000000..687ef2200
--- /dev/null
+++ b/lib/pldmfw/pldmfw_private.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2019, Intel Corporation. */
+
+#ifndef _PLDMFW_PRIVATE_H_
+#define _PLDMFW_PRIVATE_H_
+
+/* The following data structures define the layout of a firmware binary
+ * following the "PLDM For Firmware Update Specification", DMTF standard
+ * #DSP0267.
+ *
+ * pldmfw.c uses these structures to implement a simple engine that will parse
+ * a fw binary file in this format and perform a firmware update for a given
+ * device.
+ *
+ * Due to the variable sized data layout, alignment of fields within these
+ * structures is not guaranteed when reading. For this reason, all multi-byte
+ * field accesses should be done using the unaligned access macros.
+ * Additionally, the standard specifies that multi-byte fields are in
+ * LittleEndian format.
+ *
+ * The structure definitions are not made public, in order to keep direct
+ * accesses within code that is prepared to deal with the limitation of
+ * unaligned access.
+ */
+
+/* UUID for PLDM firmware packages: f018878c-cb7d-4943-9800-a02f059aca02 */
+static const uuid_t pldm_firmware_header_id =
+ UUID_INIT(0xf018878c, 0xcb7d, 0x4943,
+ 0x98, 0x00, 0xa0, 0x2f, 0x05, 0x9a, 0xca, 0x02);
+
+/* Revision number of the PLDM header format this code supports */
+#define PACKAGE_HEADER_FORMAT_REVISION 0x01
+
+/* timestamp104 structure defined in PLDM Base specification */
+#define PLDM_TIMESTAMP_SIZE 13
+struct __pldm_timestamp {
+ u8 b[PLDM_TIMESTAMP_SIZE];
+} __packed __aligned(1);
+
+/* Package Header Information */
+struct __pldm_header {
+ uuid_t id; /* PackageHeaderIdentifier */
+ u8 revision; /* PackageHeaderFormatRevision */
+ __le16 size; /* PackageHeaderSize */
+ struct __pldm_timestamp release_date; /* PackageReleaseDateTime */
+ __le16 component_bitmap_len; /* ComponentBitmapBitLength */
+ u8 version_type; /* PackageVersionStringType */
+ u8 version_len; /* PackageVersionStringLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * PackageVersionString, length is version_len.
+ *
+ * The total size of this section is
+ * sizeof(pldm_header) + version_len;
+ */
+ u8 version_string[]; /* PackageVersionString */
+} __packed __aligned(1);
+
+/* Firmware Device ID Record */
+struct __pldmfw_record_info {
+ __le16 record_len; /* RecordLength */
+ u8 descriptor_count; /* DescriptorCount */
+ __le32 device_update_flags; /* DeviceUpdateOptionFlags */
+ u8 version_type; /* ComponentImageSetVersionType */
+ u8 version_len; /* ComponentImageSetVersionLength */
+ __le16 package_data_len; /* FirmwareDevicePackageDataLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * ApplicableComponents, length is component_bitmap_len from header
+ * ComponentImageSetVersionString, length is version_len
+ * RecordDescriptors, a series of TLVs with 16bit type and length
+ * FirmwareDevicePackageData, length is package_data_len
+ *
+ * The total size of each record is
+ * sizeof(pldmfw_record_info) +
+ * component_bitmap_len (converted to bytes!) +
+ * version_len +
+ * <length of RecordDescriptors> +
+ * package_data_len
+ */
+ u8 variable_record_data[];
+} __packed __aligned(1);
+
+/* Firmware Descriptor Definition */
+struct __pldmfw_desc_tlv {
+ __le16 type; /* DescriptorType */
+ __le16 size; /* DescriptorSize */
+ u8 data[]; /* DescriptorData */
+} __aligned(1);
+
+/* Firmware Device Identification Area */
+struct __pldmfw_record_area {
+ u8 record_count; /* DeviceIDRecordCount */
+ /* This is not a struct type because the size of each record varies */
+ u8 records[];
+} __aligned(1);
+
+/* Individual Component Image Information */
+struct __pldmfw_component_info {
+ __le16 classification; /* ComponentClassfication */
+ __le16 identifier; /* ComponentIdentifier */
+ __le32 comparison_stamp; /* ComponentComparisonStamp */
+ __le16 options; /* componentOptions */
+ __le16 activation_method; /* RequestedComponentActivationMethod */
+ __le32 location_offset; /* ComponentLocationOffset */
+ __le32 size; /* ComponentSize */
+ u8 version_type; /* ComponentVersionStringType */
+ u8 version_len; /* ComponentVersionStringLength */
+
+ /*
+ * DSP0267 also includes the following variable length fields at the
+ * end of this structure:
+ *
+ * ComponentVersionString, length is version_len
+ *
+ * The total size of this section is
+ * sizeof(pldmfw_component_info) + version_len;
+ */
+ u8 version_string[]; /* ComponentVersionString */
+} __packed __aligned(1);
+
+/* Component Image Information Area */
+struct __pldmfw_component_area {
+ __le16 component_image_count;
+ /* This is not a struct type because the component size varies */
+ u8 components[];
+} __aligned(1);
+
+/**
+ * pldm_first_desc_tlv
+ * @start: byte offset of the start of the descriptor TLVs
+ *
+ * Converts the starting offset of the descriptor TLVs into a pointer to the
+ * first descriptor.
+ */
+#define pldm_first_desc_tlv(start) \
+ ((const struct __pldmfw_desc_tlv *)(start))
+
+/**
+ * pldm_next_desc_tlv
+ * @desc: pointer to a descriptor TLV
+ *
+ * Finds the pointer to the next descriptor following a given descriptor
+ */
+#define pldm_next_desc_tlv(desc) \
+ ((const struct __pldmfw_desc_tlv *)((desc)->data + \
+ get_unaligned_le16(&(desc)->size)))
+
+/**
+ * pldm_for_each_desc_tlv
+ * @i: variable to store descriptor index
+ * @desc: variable to store descriptor pointer
+ * @start: byte offset of the start of the descriptors
+ * @count: the number of descriptors
+ *
+ * for loop macro to iterate over all of the descriptors of a given PLDM
+ * record.
+ */
+#define pldm_for_each_desc_tlv(i, desc, start, count) \
+ for ((i) = 0, (desc) = pldm_first_desc_tlv(start); \
+ (i) < (count); \
+ (i)++, (desc) = pldm_next_desc_tlv(desc))
+
+/**
+ * pldm_first_record
+ * @start: byte offset of the start of the PLDM records
+ *
+ * Converts a starting offset of the PLDM records into a pointer to the first
+ * record.
+ */
+#define pldm_first_record(start) \
+ ((const struct __pldmfw_record_info *)(start))
+
+/**
+ * pldm_next_record
+ * @record: pointer to a PLDM record
+ *
+ * Finds a pointer to the next record following a given record
+ */
+#define pldm_next_record(record) \
+ ((const struct __pldmfw_record_info *) \
+ ((const u8 *)(record) + get_unaligned_le16(&(record)->record_len)))
+
+/**
+ * pldm_for_each_record
+ * @i: variable to store record index
+ * @record: variable to store record pointer
+ * @start: byte offset of the start of the records
+ * @count: the number of records
+ *
+ * for loop macro to iterate over all of the records of a PLDM file.
+ */
+#define pldm_for_each_record(i, record, start, count) \
+ for ((i) = 0, (record) = pldm_first_record(start); \
+ (i) < (count); \
+ (i)++, (record) = pldm_next_record(record))
+
+/**
+ * pldm_first_component
+ * @start: byte offset of the start of the PLDM components
+ *
+ * Convert a starting offset of the PLDM components into a pointer to the
+ * first component
+ */
+#define pldm_first_component(start) \
+ ((const struct __pldmfw_component_info *)(start))
+
+/**
+ * pldm_next_component
+ * @component: pointer to a PLDM component
+ *
+ * Finds a pointer to the next component following a given component
+ */
+#define pldm_next_component(component) \
+ ((const struct __pldmfw_component_info *)((component)->version_string + \
+ (component)->version_len))
+
+/**
+ * pldm_for_each_component
+ * @i: variable to store component index
+ * @component: variable to store component pointer
+ * @start: byte offset to the start of the first component
+ * @count: the number of components
+ *
+ * for loop macro to iterate over all of the components of a PLDM file.
+ */
+#define pldm_for_each_component(i, component, start, count) \
+ for ((i) = 0, (component) = pldm_first_component(start); \
+ (i) < (count); \
+ (i)++, (component) = pldm_next_component(component))
+
+#endif
diff --git a/lib/plist.c b/lib/plist.c
new file mode 100644
index 000000000..0d86ed7a7
--- /dev/null
+++ b/lib/plist.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lib/plist.c
+ *
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This file contains the add / del functions which are considered to
+ * be too large to inline. See include/linux/plist.h for further
+ * information.
+ */
+
+#include <linux/bug.h>
+#include <linux/plist.h>
+
+#ifdef CONFIG_DEBUG_PLIST
+
+static struct plist_head test_head;
+
+static void plist_check_prev_next(struct list_head *t, struct list_head *p,
+ struct list_head *n)
+{
+ WARN(n->prev != p || p->next != n,
+ "top: %p, n: %p, p: %p\n"
+ "prev: %p, n: %p, p: %p\n"
+ "next: %p, n: %p, p: %p\n",
+ t, t->next, t->prev,
+ p, p->next, p->prev,
+ n, n->next, n->prev);
+}
+
+static void plist_check_list(struct list_head *top)
+{
+ struct list_head *prev = top, *next = top->next;
+
+ plist_check_prev_next(top, prev, next);
+ while (next != top) {
+ prev = next;
+ next = prev->next;
+ plist_check_prev_next(top, prev, next);
+ }
+}
+
+static void plist_check_head(struct plist_head *head)
+{
+ if (!plist_head_empty(head))
+ plist_check_list(&plist_first(head)->prio_list);
+ plist_check_list(&head->node_list);
+}
+
+#else
+# define plist_check_head(h) do { } while (0)
+#endif
+
+/**
+ * plist_add - add @node to @head
+ *
+ * @node: &struct plist_node pointer
+ * @head: &struct plist_head pointer
+ */
+void plist_add(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *first, *iter, *prev = NULL;
+ struct list_head *node_next = &head->node_list;
+
+ plist_check_head(head);
+ WARN_ON(!plist_node_empty(node));
+ WARN_ON(!list_empty(&node->prio_list));
+
+ if (plist_head_empty(head))
+ goto ins_node;
+
+ first = iter = plist_first(head);
+
+ do {
+ if (node->prio < iter->prio) {
+ node_next = &iter->node_list;
+ break;
+ }
+
+ prev = iter;
+ iter = list_entry(iter->prio_list.next,
+ struct plist_node, prio_list);
+ } while (iter != first);
+
+ if (!prev || prev->prio != node->prio)
+ list_add_tail(&node->prio_list, &iter->prio_list);
+ins_node:
+ list_add_tail(&node->node_list, node_next);
+
+ plist_check_head(head);
+}
+
+/**
+ * plist_del - Remove a @node from plist.
+ *
+ * @node: &struct plist_node pointer - entry to be removed
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_del(struct plist_node *node, struct plist_head *head)
+{
+ plist_check_head(head);
+
+ if (!list_empty(&node->prio_list)) {
+ if (node->node_list.next != &head->node_list) {
+ struct plist_node *next;
+
+ next = list_entry(node->node_list.next,
+ struct plist_node, node_list);
+
+ /* add the next plist_node into prio_list */
+ if (list_empty(&next->prio_list))
+ list_add(&next->prio_list, &node->prio_list);
+ }
+ list_del_init(&node->prio_list);
+ }
+
+ list_del_init(&node->node_list);
+
+ plist_check_head(head);
+}
+
+/**
+ * plist_requeue - Requeue @node at end of same-prio entries.
+ *
+ * This is essentially an optimized plist_del() followed by
+ * plist_add(). It moves an entry already in the plist to
+ * after any other same-priority entries.
+ *
+ * @node: &struct plist_node pointer - entry to be moved
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_requeue(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+ struct list_head *node_next = &head->node_list;
+
+ plist_check_head(head);
+ BUG_ON(plist_head_empty(head));
+ BUG_ON(plist_node_empty(node));
+
+ if (node == plist_last(head))
+ return;
+
+ iter = plist_next(node);
+
+ if (node->prio != iter->prio)
+ return;
+
+ plist_del(node, head);
+
+ plist_for_each_continue(iter, head) {
+ if (node->prio != iter->prio) {
+ node_next = &iter->node_list;
+ break;
+ }
+ }
+ list_add_tail(&node->node_list, node_next);
+
+ plist_check_head(head);
+}
+
+#ifdef CONFIG_DEBUG_PLIST
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static struct plist_node __initdata test_node[241];
+
+static void __init plist_test_check(int nr_expect)
+{
+ struct plist_node *first, *prio_pos, *node_pos;
+
+ if (plist_head_empty(&test_head)) {
+ BUG_ON(nr_expect != 0);
+ return;
+ }
+
+ prio_pos = first = plist_first(&test_head);
+ plist_for_each(node_pos, &test_head) {
+ if (nr_expect-- < 0)
+ break;
+ if (node_pos == first)
+ continue;
+ if (node_pos->prio == prio_pos->prio) {
+ BUG_ON(!list_empty(&node_pos->prio_list));
+ continue;
+ }
+
+ BUG_ON(prio_pos->prio > node_pos->prio);
+ BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
+ prio_pos = node_pos;
+ }
+
+ BUG_ON(nr_expect != 0);
+ BUG_ON(prio_pos->prio_list.next != &first->prio_list);
+}
+
+static void __init plist_test_requeue(struct plist_node *node)
+{
+ plist_requeue(node, &test_head);
+
+ if (node != plist_last(&test_head))
+ BUG_ON(node->prio == plist_next(node)->prio);
+}
+
+static int __init plist_test(void)
+{
+ int nr_expect = 0, i, loop;
+ unsigned int r = local_clock();
+
+ printk(KERN_DEBUG "start plist test\n");
+ plist_head_init(&test_head);
+ for (i = 0; i < ARRAY_SIZE(test_node); i++)
+ plist_node_init(test_node + i, 0);
+
+ for (loop = 0; loop < 1000; loop++) {
+ r = r * 193939 % 47629;
+ i = r % ARRAY_SIZE(test_node);
+ if (plist_node_empty(test_node + i)) {
+ r = r * 193939 % 47629;
+ test_node[i].prio = r % 99;
+ plist_add(test_node + i, &test_head);
+ nr_expect++;
+ } else {
+ plist_del(test_node + i, &test_head);
+ nr_expect--;
+ }
+ plist_test_check(nr_expect);
+ if (!plist_node_empty(test_node + i)) {
+ plist_test_requeue(test_node + i);
+ plist_test_check(nr_expect);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(test_node); i++) {
+ if (plist_node_empty(test_node + i))
+ continue;
+ plist_del(test_node + i, &test_head);
+ nr_expect--;
+ plist_test_check(nr_expect);
+ }
+
+ printk(KERN_DEBUG "end plist test\n");
+ return 0;
+}
+
+module_init(plist_test);
+
+#endif
diff --git a/lib/pm-notifier-error-inject.c b/lib/pm-notifier-error-inject.c
new file mode 100644
index 000000000..5d89f0d90
--- /dev/null
+++ b/lib/pm-notifier-error-inject.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify PM notifier priority");
+
+static struct notifier_err_inject pm_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(PM_HIBERNATION_PREPARE) },
+ { NOTIFIER_ERR_INJECT_ACTION(PM_SUSPEND_PREPARE) },
+ { NOTIFIER_ERR_INJECT_ACTION(PM_RESTORE_PREPARE) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("pm", notifier_err_inject_dir,
+ &pm_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_pm_notifier(&pm_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void err_inject_exit(void)
+{
+ unregister_pm_notifier(&pm_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("PM notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/polynomial.c b/lib/polynomial.c
new file mode 100644
index 000000000..66d383445
--- /dev/null
+++ b/lib/polynomial.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic polynomial calculation using integer coefficients.
+ *
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/polynomial.h>
+
+/*
+ * Originally this was part of drivers/hwmon/bt1-pvt.c.
+ * There the following conversion is used and should serve as an example here:
+ *
+ * The original translation formulae of the temperature (in degrees of Celsius)
+ * to PVT data and vice-versa are following:
+ *
+ * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) +
+ * 1.7204e2
+ * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) +
+ * 3.1020e-1*(N^1) - 4.838e1
+ *
+ * where T = [-48.380, 147.438]C and N = [0, 1023].
+ *
+ * They must be accordingly altered to be suitable for the integer arithmetics.
+ * The technique is called 'factor redistribution', which just makes sure the
+ * multiplications and divisions are made so to have a result of the operations
+ * within the integer numbers limit. In addition we need to translate the
+ * formulae to accept millidegrees of Celsius. Here what they look like after
+ * the alterations:
+ *
+ * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T +
+ * 17204e2) / 1e4
+ * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D -
+ * 48380
+ * where T = [-48380, 147438] mC and N = [0, 1023].
+ *
+ * static const struct polynomial poly_temp_to_N = {
+ * .total_divider = 10000,
+ * .terms = {
+ * {4, 18322, 10000, 10000},
+ * {3, 2343, 10000, 10},
+ * {2, 87018, 10000, 10},
+ * {1, 39269, 1000, 1},
+ * {0, 1720400, 1, 1}
+ * }
+ * };
+ *
+ * static const struct polynomial poly_N_to_temp = {
+ * .total_divider = 1,
+ * .terms = {
+ * {4, -16743, 1000, 1},
+ * {3, 81542, 1000, 1},
+ * {2, -182010, 1000, 1},
+ * {1, 310200, 1000, 1},
+ * {0, -48380, 1, 1}
+ * }
+ * };
+ */
+
+/**
+ * polynomial_calc - calculate a polynomial using integer arithmetic
+ *
+ * @poly: pointer to the descriptor of the polynomial
+ * @data: input value of the polynimal
+ *
+ * Calculate the result of a polynomial using only integer arithmetic. For
+ * this to work without too much loss of precision the coefficients has to
+ * be altered. This is called factor redistribution.
+ *
+ * Returns the result of the polynomial calculation.
+ */
+long polynomial_calc(const struct polynomial *poly, long data)
+{
+ const struct polynomial_term *term = poly->terms;
+ long total_divider = poly->total_divider ?: 1;
+ long tmp, ret = 0;
+ int deg;
+
+ /*
+ * Here is the polynomial calculation function, which performs the
+ * redistributed terms calculations. It's pretty straightforward.
+ * We walk over each degree term up to the free one, and perform
+ * the redistributed multiplication of the term coefficient, its
+ * divider (as for the rationale fraction representation), data
+ * power and the rational fraction divider leftover. Then all of
+ * this is collected in a total sum variable, which value is
+ * normalized by the total divider before being returned.
+ */
+ do {
+ tmp = term->coef;
+ for (deg = 0; deg < term->deg; ++deg)
+ tmp = mult_frac(tmp, data, term->divider);
+ ret += tmp / term->divider_leftover;
+ } while ((term++)->deg);
+
+ return ret / total_divider;
+}
+EXPORT_SYMBOL_GPL(polynomial_calc);
+
+MODULE_DESCRIPTION("Generic polynomial calculations");
+MODULE_LICENSE("GPL");
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
new file mode 100644
index 000000000..976b9bd02
--- /dev/null
+++ b/lib/radix-tree.c
@@ -0,0 +1,1608 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2001 Momchil Velikov
+ * Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2005 SGI, Christoph Lameter
+ * Copyright (C) 2006 Nick Piggin
+ * Copyright (C) 2012 Konstantin Khlebnikov
+ * Copyright (C) 2016 Intel, Matthew Wilcox
+ * Copyright (C) 2016 Intel, Ross Zwisler
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h> /* in_interrupt() */
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/xarray.h>
+
+#include "radix-tree.h"
+
+/*
+ * Radix tree node cache.
+ */
+struct kmem_cache *radix_tree_node_cachep;
+
+/*
+ * The radix tree is variable-height, so an insert operation not only has
+ * to build the branch to its corresponding item, it also has to build the
+ * branch to existing items if the size has to be increased (by
+ * radix_tree_extend).
+ *
+ * The worst case is a zero height tree with just a single item at index 0,
+ * and then inserting an item at index ULONG_MAX. This requires 2 new branches
+ * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ * Hence:
+ */
+#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+
+/*
+ * The IDR does not have to be as high as the radix tree since it uses
+ * signed integers, not unsigned longs.
+ */
+#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
+#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
+
+/*
+ * Per-cpu pool of preloaded nodes
+ */
+DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
+ .lock = INIT_LOCAL_LOCK(lock),
+};
+EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
+
+static inline struct radix_tree_node *entry_to_node(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
+}
+
+static inline void *node_to_entry(void *ptr)
+{
+ return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
+}
+
+#define RADIX_TREE_RETRY XA_RETRY_ENTRY
+
+static inline unsigned long
+get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
+{
+ return parent ? slot - parent->slots : 0;
+}
+
+static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
+ struct radix_tree_node **nodep, unsigned long index)
+{
+ unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
+ void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
+
+ *nodep = (void *)entry;
+ return offset;
+}
+
+static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
+{
+ return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
+}
+
+static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __set_bit(offset, node->tags[tag]);
+}
+
+static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __clear_bit(offset, node->tags[tag]);
+}
+
+static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ return test_bit(offset, node->tags[tag]);
+}
+
+static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
+{
+ root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
+}
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
+{
+ root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
+}
+
+static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
+{
+ return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
+}
+
+static inline unsigned root_tags_get(const struct radix_tree_root *root)
+{
+ return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
+}
+
+static inline bool is_idr(const struct radix_tree_root *root)
+{
+ return !!(root->xa_flags & ROOT_IS_IDR);
+}
+
+/*
+ * Returns 1 if any slot in the node has this tag set.
+ * Otherwise returns 0.
+ */
+static inline int any_tag_set(const struct radix_tree_node *node,
+ unsigned int tag)
+{
+ unsigned idx;
+ for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+ if (node->tags[tag][idx])
+ return 1;
+ }
+ return 0;
+}
+
+static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
+{
+ bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
+}
+
+/**
+ * radix_tree_find_next_bit - find the next set bit in a memory region
+ *
+ * @node: where to begin the search
+ * @tag: the tag index
+ * @offset: the bitnumber to start searching at
+ *
+ * Unrollable variant of find_next_bit() for constant size arrays.
+ * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
+ * Returns next bit offset, or size if nothing found.
+ */
+static __always_inline unsigned long
+radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
+ unsigned long offset)
+{
+ const unsigned long *addr = node->tags[tag];
+
+ if (offset < RADIX_TREE_MAP_SIZE) {
+ unsigned long tmp;
+
+ addr += offset / BITS_PER_LONG;
+ tmp = *addr >> (offset % BITS_PER_LONG);
+ if (tmp)
+ return __ffs(tmp) + offset;
+ offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
+ while (offset < RADIX_TREE_MAP_SIZE) {
+ tmp = *++addr;
+ if (tmp)
+ return __ffs(tmp) + offset;
+ offset += BITS_PER_LONG;
+ }
+ }
+ return RADIX_TREE_MAP_SIZE;
+}
+
+static unsigned int iter_offset(const struct radix_tree_iter *iter)
+{
+ return iter->index & RADIX_TREE_MAP_MASK;
+}
+
+/*
+ * The maximum index which can be stored in a radix tree
+ */
+static inline unsigned long shift_maxindex(unsigned int shift)
+{
+ return (RADIX_TREE_MAP_SIZE << shift) - 1;
+}
+
+static inline unsigned long node_maxindex(const struct radix_tree_node *node)
+{
+ return shift_maxindex(node->shift);
+}
+
+static unsigned long next_index(unsigned long index,
+ const struct radix_tree_node *node,
+ unsigned long offset)
+{
+ return (index & ~node_maxindex(node)) + (offset << node->shift);
+}
+
+/*
+ * This assumes that the caller has performed appropriate preallocation, and
+ * that the caller has pinned this thread of control to the current CPU.
+ */
+static struct radix_tree_node *
+radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
+ struct radix_tree_root *root,
+ unsigned int shift, unsigned int offset,
+ unsigned int count, unsigned int nr_values)
+{
+ struct radix_tree_node *ret = NULL;
+
+ /*
+ * Preload code isn't irq safe and it doesn't make sense to use
+ * preloading during an interrupt anyway as all the allocations have
+ * to be atomic. So just do normal allocation when in interrupt.
+ */
+ if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
+ struct radix_tree_preload *rtp;
+
+ /*
+ * Even if the caller has preloaded, try to allocate from the
+ * cache first for the new node to get accounted to the memory
+ * cgroup.
+ */
+ ret = kmem_cache_alloc(radix_tree_node_cachep,
+ gfp_mask | __GFP_NOWARN);
+ if (ret)
+ goto out;
+
+ /*
+ * Provided the caller has preloaded here, we will always
+ * succeed in getting a node here (and never reach
+ * kmem_cache_alloc)
+ */
+ rtp = this_cpu_ptr(&radix_tree_preloads);
+ if (rtp->nr) {
+ ret = rtp->nodes;
+ rtp->nodes = ret->parent;
+ rtp->nr--;
+ }
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(ret);
+ goto out;
+ }
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+out:
+ BUG_ON(radix_tree_is_internal_node(ret));
+ if (ret) {
+ ret->shift = shift;
+ ret->offset = offset;
+ ret->count = count;
+ ret->nr_values = nr_values;
+ ret->parent = parent;
+ ret->array = root;
+ }
+ return ret;
+}
+
+void radix_tree_node_rcu_free(struct rcu_head *head)
+{
+ struct radix_tree_node *node =
+ container_of(head, struct radix_tree_node, rcu_head);
+
+ /*
+ * Must only free zeroed nodes into the slab. We can be left with
+ * non-NULL entries by radix_tree_free_nodes, so clear the entries
+ * and tags here.
+ */
+ memset(node->slots, 0, sizeof(node->slots));
+ memset(node->tags, 0, sizeof(node->tags));
+ INIT_LIST_HEAD(&node->private_list);
+
+ kmem_cache_free(radix_tree_node_cachep, node);
+}
+
+static inline void
+radix_tree_node_free(struct radix_tree_node *node)
+{
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+}
+
+/*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+ * success, return zero, with preemption disabled. On error, return -ENOMEM
+ * with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
+ */
+static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
+{
+ struct radix_tree_preload *rtp;
+ struct radix_tree_node *node;
+ int ret = -ENOMEM;
+
+ /*
+ * Nodes preloaded by one cgroup can be used by another cgroup, so
+ * they should never be accounted to any particular memory cgroup.
+ */
+ gfp_mask &= ~__GFP_ACCOUNT;
+
+ local_lock(&radix_tree_preloads.lock);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
+ while (rtp->nr < nr) {
+ local_unlock(&radix_tree_preloads.lock);
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+ if (node == NULL)
+ goto out;
+ local_lock(&radix_tree_preloads.lock);
+ rtp = this_cpu_ptr(&radix_tree_preloads);
+ if (rtp->nr < nr) {
+ node->parent = rtp->nodes;
+ rtp->nodes = node;
+ rtp->nr++;
+ } else {
+ kmem_cache_free(radix_tree_node_cachep, node);
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+ * success, return zero, with preemption disabled. On error, return -ENOMEM
+ * with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
+ */
+int radix_tree_preload(gfp_t gfp_mask)
+{
+ /* Warn on non-sensical use... */
+ WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
+ return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
+}
+EXPORT_SYMBOL(radix_tree_preload);
+
+/*
+ * The same as above function, except we don't guarantee preloading happens.
+ * We do it, if we decide it helps. On success, return zero with preemption
+ * disabled. On error, return -ENOMEM with preemption not disabled.
+ */
+int radix_tree_maybe_preload(gfp_t gfp_mask)
+{
+ if (gfpflags_allow_blocking(gfp_mask))
+ return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
+ /* Preloading doesn't help anything with this gfp mask, skip it */
+ local_lock(&radix_tree_preloads.lock);
+ return 0;
+}
+EXPORT_SYMBOL(radix_tree_maybe_preload);
+
+static unsigned radix_tree_load_root(const struct radix_tree_root *root,
+ struct radix_tree_node **nodep, unsigned long *maxindex)
+{
+ struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
+
+ *nodep = node;
+
+ if (likely(radix_tree_is_internal_node(node))) {
+ node = entry_to_node(node);
+ *maxindex = node_maxindex(node);
+ return node->shift + RADIX_TREE_MAP_SHIFT;
+ }
+
+ *maxindex = 0;
+ return 0;
+}
+
+/*
+ * Extend a radix tree so it can store key @index.
+ */
+static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
+ unsigned long index, unsigned int shift)
+{
+ void *entry;
+ unsigned int maxshift;
+ int tag;
+
+ /* Figure out what the shift should be. */
+ maxshift = shift;
+ while (index > shift_maxindex(maxshift))
+ maxshift += RADIX_TREE_MAP_SHIFT;
+
+ entry = rcu_dereference_raw(root->xa_head);
+ if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
+ goto out;
+
+ do {
+ struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
+ root, shift, 0, 1, 0);
+ if (!node)
+ return -ENOMEM;
+
+ if (is_idr(root)) {
+ all_tag_set(node, IDR_FREE);
+ if (!root_tag_get(root, IDR_FREE)) {
+ tag_clear(node, IDR_FREE, 0);
+ root_tag_set(root, IDR_FREE);
+ }
+ } else {
+ /* Propagate the aggregated tag info to the new child */
+ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
+ if (root_tag_get(root, tag))
+ tag_set(node, tag, 0);
+ }
+ }
+
+ BUG_ON(shift > BITS_PER_LONG);
+ if (radix_tree_is_internal_node(entry)) {
+ entry_to_node(entry)->parent = node;
+ } else if (xa_is_value(entry)) {
+ /* Moving a value entry root->xa_head to a node */
+ node->nr_values = 1;
+ }
+ /*
+ * entry was already in the radix tree, so we do not need
+ * rcu_assign_pointer here
+ */
+ node->slots[0] = (void __rcu *)entry;
+ entry = node_to_entry(node);
+ rcu_assign_pointer(root->xa_head, entry);
+ shift += RADIX_TREE_MAP_SHIFT;
+ } while (shift <= maxshift);
+out:
+ return maxshift + RADIX_TREE_MAP_SHIFT;
+}
+
+/**
+ * radix_tree_shrink - shrink radix tree to minimum height
+ * @root: radix tree root
+ */
+static inline bool radix_tree_shrink(struct radix_tree_root *root)
+{
+ bool shrunk = false;
+
+ for (;;) {
+ struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
+ struct radix_tree_node *child;
+
+ if (!radix_tree_is_internal_node(node))
+ break;
+ node = entry_to_node(node);
+
+ /*
+ * The candidate node has more than one child, or its child
+ * is not at the leftmost slot, we cannot shrink.
+ */
+ if (node->count != 1)
+ break;
+ child = rcu_dereference_raw(node->slots[0]);
+ if (!child)
+ break;
+
+ /*
+ * For an IDR, we must not shrink entry 0 into the root in
+ * case somebody calls idr_replace() with a pointer that
+ * appears to be an internal entry
+ */
+ if (!node->shift && is_idr(root))
+ break;
+
+ if (radix_tree_is_internal_node(child))
+ entry_to_node(child)->parent = NULL;
+
+ /*
+ * We don't need rcu_assign_pointer(), since we are simply
+ * moving the node from one part of the tree to another: if it
+ * was safe to dereference the old pointer to it
+ * (node->slots[0]), it will be safe to dereference the new
+ * one (root->xa_head) as far as dependent read barriers go.
+ */
+ root->xa_head = (void __rcu *)child;
+ if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
+ root_tag_clear(root, IDR_FREE);
+
+ /*
+ * We have a dilemma here. The node's slot[0] must not be
+ * NULLed in case there are concurrent lookups expecting to
+ * find the item. However if this was a bottom-level node,
+ * then it may be subject to the slot pointer being visible
+ * to callers dereferencing it. If item corresponding to
+ * slot[0] is subsequently deleted, these callers would expect
+ * their slot to become empty sooner or later.
+ *
+ * For example, lockless pagecache will look up a slot, deref
+ * the page pointer, and if the page has 0 refcount it means it
+ * was concurrently deleted from pagecache so try the deref
+ * again. Fortunately there is already a requirement for logic
+ * to retry the entire slot lookup -- the indirect pointer
+ * problem (replacing direct root node with an indirect pointer
+ * also results in a stale slot). So tag the slot as indirect
+ * to force callers to retry.
+ */
+ node->count = 0;
+ if (!radix_tree_is_internal_node(child)) {
+ node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
+ }
+
+ WARN_ON_ONCE(!list_empty(&node->private_list));
+ radix_tree_node_free(node);
+ shrunk = true;
+ }
+
+ return shrunk;
+}
+
+static bool delete_node(struct radix_tree_root *root,
+ struct radix_tree_node *node)
+{
+ bool deleted = false;
+
+ do {
+ struct radix_tree_node *parent;
+
+ if (node->count) {
+ if (node_to_entry(node) ==
+ rcu_dereference_raw(root->xa_head))
+ deleted |= radix_tree_shrink(root);
+ return deleted;
+ }
+
+ parent = node->parent;
+ if (parent) {
+ parent->slots[node->offset] = NULL;
+ parent->count--;
+ } else {
+ /*
+ * Shouldn't the tags already have all been cleared
+ * by the caller?
+ */
+ if (!is_idr(root))
+ root_tag_clear_all(root);
+ root->xa_head = NULL;
+ }
+
+ WARN_ON_ONCE(!list_empty(&node->private_list));
+ radix_tree_node_free(node);
+ deleted = true;
+
+ node = parent;
+ } while (node);
+
+ return deleted;
+}
+
+/**
+ * __radix_tree_create - create a slot in a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @nodep: returns node
+ * @slotp: returns slot
+ *
+ * Create, if necessary, and return the node and slot for an item
+ * at position @index in the radix tree @root.
+ *
+ * Until there is more than one item in the tree, no nodes are
+ * allocated and @root->xa_head is used as a direct slot instead of
+ * pointing to a node, in which case *@nodep will be NULL.
+ *
+ * Returns -ENOMEM, or 0 for success.
+ */
+static int __radix_tree_create(struct radix_tree_root *root,
+ unsigned long index, struct radix_tree_node **nodep,
+ void __rcu ***slotp)
+{
+ struct radix_tree_node *node = NULL, *child;
+ void __rcu **slot = (void __rcu **)&root->xa_head;
+ unsigned long maxindex;
+ unsigned int shift, offset = 0;
+ unsigned long max = index;
+ gfp_t gfp = root_gfp_mask(root);
+
+ shift = radix_tree_load_root(root, &child, &maxindex);
+
+ /* Make sure the tree is high enough. */
+ if (max > maxindex) {
+ int error = radix_tree_extend(root, gfp, max, shift);
+ if (error < 0)
+ return error;
+ shift = error;
+ child = rcu_dereference_raw(root->xa_head);
+ }
+
+ while (shift > 0) {
+ shift -= RADIX_TREE_MAP_SHIFT;
+ if (child == NULL) {
+ /* Have to add a child node. */
+ child = radix_tree_node_alloc(gfp, node, root, shift,
+ offset, 0, 0);
+ if (!child)
+ return -ENOMEM;
+ rcu_assign_pointer(*slot, node_to_entry(child));
+ if (node)
+ node->count++;
+ } else if (!radix_tree_is_internal_node(child))
+ break;
+
+ /* Go a level down */
+ node = entry_to_node(child);
+ offset = radix_tree_descend(node, &child, index);
+ slot = &node->slots[offset];
+ }
+
+ if (nodep)
+ *nodep = node;
+ if (slotp)
+ *slotp = slot;
+ return 0;
+}
+
+/*
+ * Free any nodes below this node. The tree is presumed to not need
+ * shrinking, and any user data in the tree is presumed to not need a
+ * destructor called on it. If we need to add a destructor, we can
+ * add that functionality later. Note that we may not clear tags or
+ * slots from the tree as an RCU walker may still have a pointer into
+ * this subtree. We could replace the entries with RADIX_TREE_RETRY,
+ * but we'll still have to clear those in rcu_free.
+ */
+static void radix_tree_free_nodes(struct radix_tree_node *node)
+{
+ unsigned offset = 0;
+ struct radix_tree_node *child = entry_to_node(node);
+
+ for (;;) {
+ void *entry = rcu_dereference_raw(child->slots[offset]);
+ if (xa_is_node(entry) && child->shift) {
+ child = entry_to_node(entry);
+ offset = 0;
+ continue;
+ }
+ offset++;
+ while (offset == RADIX_TREE_MAP_SIZE) {
+ struct radix_tree_node *old = child;
+ offset = child->offset + 1;
+ child = child->parent;
+ WARN_ON_ONCE(!list_empty(&old->private_list));
+ radix_tree_node_free(old);
+ if (old == entry_to_node(node))
+ return;
+ }
+ }
+}
+
+static inline int insert_entries(struct radix_tree_node *node,
+ void __rcu **slot, void *item)
+{
+ if (*slot)
+ return -EEXIST;
+ rcu_assign_pointer(*slot, item);
+ if (node) {
+ node->count++;
+ if (xa_is_value(item))
+ node->nr_values++;
+ }
+ return 1;
+}
+
+/**
+ * radix_tree_insert - insert into a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @item: item to insert
+ *
+ * Insert an item into the radix tree at position @index.
+ */
+int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
+ void *item)
+{
+ struct radix_tree_node *node;
+ void __rcu **slot;
+ int error;
+
+ BUG_ON(radix_tree_is_internal_node(item));
+
+ error = __radix_tree_create(root, index, &node, &slot);
+ if (error)
+ return error;
+
+ error = insert_entries(node, slot, item);
+ if (error < 0)
+ return error;
+
+ if (node) {
+ unsigned offset = get_slot_offset(node, slot);
+ BUG_ON(tag_get(node, 0, offset));
+ BUG_ON(tag_get(node, 1, offset));
+ BUG_ON(tag_get(node, 2, offset));
+ } else {
+ BUG_ON(root_tags_get(root));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(radix_tree_insert);
+
+/**
+ * __radix_tree_lookup - lookup an item in a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @nodep: returns node
+ * @slotp: returns slot
+ *
+ * Lookup and return the item at position @index in the radix
+ * tree @root.
+ *
+ * Until there is more than one item in the tree, no nodes are
+ * allocated and @root->xa_head is used as a direct slot instead of
+ * pointing to a node, in which case *@nodep will be NULL.
+ */
+void *__radix_tree_lookup(const struct radix_tree_root *root,
+ unsigned long index, struct radix_tree_node **nodep,
+ void __rcu ***slotp)
+{
+ struct radix_tree_node *node, *parent;
+ unsigned long maxindex;
+ void __rcu **slot;
+
+ restart:
+ parent = NULL;
+ slot = (void __rcu **)&root->xa_head;
+ radix_tree_load_root(root, &node, &maxindex);
+ if (index > maxindex)
+ return NULL;
+
+ while (radix_tree_is_internal_node(node)) {
+ unsigned offset;
+
+ parent = entry_to_node(node);
+ offset = radix_tree_descend(parent, &node, index);
+ slot = parent->slots + offset;
+ if (node == RADIX_TREE_RETRY)
+ goto restart;
+ if (parent->shift == 0)
+ break;
+ }
+
+ if (nodep)
+ *nodep = parent;
+ if (slotp)
+ *slotp = slot;
+ return node;
+}
+
+/**
+ * radix_tree_lookup_slot - lookup a slot in a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Returns: the slot corresponding to the position @index in the
+ * radix tree @root. This is useful for update-if-exists operations.
+ *
+ * This function can be called under rcu_read_lock iff the slot is not
+ * modified by radix_tree_replace_slot, otherwise it must be called
+ * exclusive from other writers. Any dereference of the slot must be done
+ * using radix_tree_deref_slot.
+ */
+void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
+ unsigned long index)
+{
+ void __rcu **slot;
+
+ if (!__radix_tree_lookup(root, index, NULL, &slot))
+ return NULL;
+ return slot;
+}
+EXPORT_SYMBOL(radix_tree_lookup_slot);
+
+/**
+ * radix_tree_lookup - perform lookup operation on a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Lookup the item at the position @index in the radix tree @root.
+ *
+ * This function can be called under rcu_read_lock, however the caller
+ * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
+ * them safely). No RCU barriers are required to access or modify the
+ * returned item, however.
+ */
+void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
+{
+ return __radix_tree_lookup(root, index, NULL, NULL);
+}
+EXPORT_SYMBOL(radix_tree_lookup);
+
+static void replace_slot(void __rcu **slot, void *item,
+ struct radix_tree_node *node, int count, int values)
+{
+ if (node && (count || values)) {
+ node->count += count;
+ node->nr_values += values;
+ }
+
+ rcu_assign_pointer(*slot, item);
+}
+
+static bool node_tag_get(const struct radix_tree_root *root,
+ const struct radix_tree_node *node,
+ unsigned int tag, unsigned int offset)
+{
+ if (node)
+ return tag_get(node, tag, offset);
+ return root_tag_get(root, tag);
+}
+
+/*
+ * IDR users want to be able to store NULL in the tree, so if the slot isn't
+ * free, don't adjust the count, even if it's transitioning between NULL and
+ * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
+ * have empty bits, but it only stores NULL in slots when they're being
+ * deleted.
+ */
+static int calculate_count(struct radix_tree_root *root,
+ struct radix_tree_node *node, void __rcu **slot,
+ void *item, void *old)
+{
+ if (is_idr(root)) {
+ unsigned offset = get_slot_offset(node, slot);
+ bool free = node_tag_get(root, node, IDR_FREE, offset);
+ if (!free)
+ return 0;
+ if (!old)
+ return 1;
+ }
+ return !!item - !!old;
+}
+
+/**
+ * __radix_tree_replace - replace item in a slot
+ * @root: radix tree root
+ * @node: pointer to tree node
+ * @slot: pointer to slot in @node
+ * @item: new item to store in the slot.
+ *
+ * For use with __radix_tree_lookup(). Caller must hold tree write locked
+ * across slot lookup and replacement.
+ */
+void __radix_tree_replace(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ void __rcu **slot, void *item)
+{
+ void *old = rcu_dereference_raw(*slot);
+ int values = !!xa_is_value(item) - !!xa_is_value(old);
+ int count = calculate_count(root, node, slot, item, old);
+
+ /*
+ * This function supports replacing value entries and
+ * deleting entries, but that needs accounting against the
+ * node unless the slot is root->xa_head.
+ */
+ WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
+ (count || values));
+ replace_slot(slot, item, node, count, values);
+
+ if (!node)
+ return;
+
+ delete_node(root, node);
+}
+
+/**
+ * radix_tree_replace_slot - replace item in a slot
+ * @root: radix tree root
+ * @slot: pointer to slot
+ * @item: new item to store in the slot.
+ *
+ * For use with radix_tree_lookup_slot() and
+ * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
+ * across slot lookup and replacement.
+ *
+ * NOTE: This cannot be used to switch between non-entries (empty slots),
+ * regular entries, and value entries, as that requires accounting
+ * inside the radix tree node. When switching from one type of entry or
+ * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
+ * radix_tree_iter_replace().
+ */
+void radix_tree_replace_slot(struct radix_tree_root *root,
+ void __rcu **slot, void *item)
+{
+ __radix_tree_replace(root, NULL, slot, item);
+}
+EXPORT_SYMBOL(radix_tree_replace_slot);
+
+/**
+ * radix_tree_iter_replace - replace item in a slot
+ * @root: radix tree root
+ * @iter: iterator state
+ * @slot: pointer to slot
+ * @item: new item to store in the slot.
+ *
+ * For use with radix_tree_for_each_slot().
+ * Caller must hold tree write locked.
+ */
+void radix_tree_iter_replace(struct radix_tree_root *root,
+ const struct radix_tree_iter *iter,
+ void __rcu **slot, void *item)
+{
+ __radix_tree_replace(root, iter->node, slot, item);
+}
+
+static void node_tag_set(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ unsigned int tag, unsigned int offset)
+{
+ while (node) {
+ if (tag_get(node, tag, offset))
+ return;
+ tag_set(node, tag, offset);
+ offset = node->offset;
+ node = node->parent;
+ }
+
+ if (!root_tag_get(root, tag))
+ root_tag_set(root, tag);
+}
+
+/**
+ * radix_tree_tag_set - set a tag on a radix tree node
+ * @root: radix tree root
+ * @index: index key
+ * @tag: tag index
+ *
+ * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
+ * corresponding to @index in the radix tree. From
+ * the root all the way down to the leaf node.
+ *
+ * Returns the address of the tagged item. Setting a tag on a not-present
+ * item is a bug.
+ */
+void *radix_tree_tag_set(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag)
+{
+ struct radix_tree_node *node, *parent;
+ unsigned long maxindex;
+
+ radix_tree_load_root(root, &node, &maxindex);
+ BUG_ON(index > maxindex);
+
+ while (radix_tree_is_internal_node(node)) {
+ unsigned offset;
+
+ parent = entry_to_node(node);
+ offset = radix_tree_descend(parent, &node, index);
+ BUG_ON(!node);
+
+ if (!tag_get(parent, tag, offset))
+ tag_set(parent, tag, offset);
+ }
+
+ /* set the root's tag bit */
+ if (!root_tag_get(root, tag))
+ root_tag_set(root, tag);
+
+ return node;
+}
+EXPORT_SYMBOL(radix_tree_tag_set);
+
+static void node_tag_clear(struct radix_tree_root *root,
+ struct radix_tree_node *node,
+ unsigned int tag, unsigned int offset)
+{
+ while (node) {
+ if (!tag_get(node, tag, offset))
+ return;
+ tag_clear(node, tag, offset);
+ if (any_tag_set(node, tag))
+ return;
+
+ offset = node->offset;
+ node = node->parent;
+ }
+
+ /* clear the root's tag bit */
+ if (root_tag_get(root, tag))
+ root_tag_clear(root, tag);
+}
+
+/**
+ * radix_tree_tag_clear - clear a tag on a radix tree node
+ * @root: radix tree root
+ * @index: index key
+ * @tag: tag index
+ *
+ * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
+ * corresponding to @index in the radix tree. If this causes
+ * the leaf node to have no tags set then clear the tag in the
+ * next-to-leaf node, etc.
+ *
+ * Returns the address of the tagged item on success, else NULL. ie:
+ * has the same return value and semantics as radix_tree_lookup().
+ */
+void *radix_tree_tag_clear(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag)
+{
+ struct radix_tree_node *node, *parent;
+ unsigned long maxindex;
+ int offset = 0;
+
+ radix_tree_load_root(root, &node, &maxindex);
+ if (index > maxindex)
+ return NULL;
+
+ parent = NULL;
+
+ while (radix_tree_is_internal_node(node)) {
+ parent = entry_to_node(node);
+ offset = radix_tree_descend(parent, &node, index);
+ }
+
+ if (node)
+ node_tag_clear(root, parent, tag, offset);
+
+ return node;
+}
+EXPORT_SYMBOL(radix_tree_tag_clear);
+
+/**
+ * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
+ * @root: radix tree root
+ * @iter: iterator state
+ * @tag: tag to clear
+ */
+void radix_tree_iter_tag_clear(struct radix_tree_root *root,
+ const struct radix_tree_iter *iter, unsigned int tag)
+{
+ node_tag_clear(root, iter->node, tag, iter_offset(iter));
+}
+
+/**
+ * radix_tree_tag_get - get a tag on a radix tree node
+ * @root: radix tree root
+ * @index: index key
+ * @tag: tag index (< RADIX_TREE_MAX_TAGS)
+ *
+ * Return values:
+ *
+ * 0: tag not present or not set
+ * 1: tag set
+ *
+ * Note that the return value of this function may not be relied on, even if
+ * the RCU lock is held, unless tag modification and node deletion are excluded
+ * from concurrency.
+ */
+int radix_tree_tag_get(const struct radix_tree_root *root,
+ unsigned long index, unsigned int tag)
+{
+ struct radix_tree_node *node, *parent;
+ unsigned long maxindex;
+
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ radix_tree_load_root(root, &node, &maxindex);
+ if (index > maxindex)
+ return 0;
+
+ while (radix_tree_is_internal_node(node)) {
+ unsigned offset;
+
+ parent = entry_to_node(node);
+ offset = radix_tree_descend(parent, &node, index);
+
+ if (!tag_get(parent, tag, offset))
+ return 0;
+ if (node == RADIX_TREE_RETRY)
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(radix_tree_tag_get);
+
+/* Construct iter->tags bit-mask from node->tags[tag] array */
+static void set_iter_tags(struct radix_tree_iter *iter,
+ struct radix_tree_node *node, unsigned offset,
+ unsigned tag)
+{
+ unsigned tag_long = offset / BITS_PER_LONG;
+ unsigned tag_bit = offset % BITS_PER_LONG;
+
+ if (!node) {
+ iter->tags = 1;
+ return;
+ }
+
+ iter->tags = node->tags[tag][tag_long] >> tag_bit;
+
+ /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
+ if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
+ /* Pick tags from next element */
+ if (tag_bit)
+ iter->tags |= node->tags[tag][tag_long + 1] <<
+ (BITS_PER_LONG - tag_bit);
+ /* Clip chunk size, here only BITS_PER_LONG tags */
+ iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
+ }
+}
+
+void __rcu **radix_tree_iter_resume(void __rcu **slot,
+ struct radix_tree_iter *iter)
+{
+ iter->index = __radix_tree_iter_add(iter, 1);
+ iter->next_index = iter->index;
+ iter->tags = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(radix_tree_iter_resume);
+
+/**
+ * radix_tree_next_chunk - find next chunk of slots for iteration
+ *
+ * @root: radix tree root
+ * @iter: iterator state
+ * @flags: RADIX_TREE_ITER_* flags and tag index
+ * Returns: pointer to chunk first slot, or NULL if iteration is over
+ */
+void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
+ struct radix_tree_iter *iter, unsigned flags)
+{
+ unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
+ struct radix_tree_node *node, *child;
+ unsigned long index, offset, maxindex;
+
+ if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
+ return NULL;
+
+ /*
+ * Catch next_index overflow after ~0UL. iter->index never overflows
+ * during iterating; it can be zero only at the beginning.
+ * And we cannot overflow iter->next_index in a single step,
+ * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
+ *
+ * This condition also used by radix_tree_next_slot() to stop
+ * contiguous iterating, and forbid switching to the next chunk.
+ */
+ index = iter->next_index;
+ if (!index && iter->index)
+ return NULL;
+
+ restart:
+ radix_tree_load_root(root, &child, &maxindex);
+ if (index > maxindex)
+ return NULL;
+ if (!child)
+ return NULL;
+
+ if (!radix_tree_is_internal_node(child)) {
+ /* Single-slot tree */
+ iter->index = index;
+ iter->next_index = maxindex + 1;
+ iter->tags = 1;
+ iter->node = NULL;
+ return (void __rcu **)&root->xa_head;
+ }
+
+ do {
+ node = entry_to_node(child);
+ offset = radix_tree_descend(node, &child, index);
+
+ if ((flags & RADIX_TREE_ITER_TAGGED) ?
+ !tag_get(node, tag, offset) : !child) {
+ /* Hole detected */
+ if (flags & RADIX_TREE_ITER_CONTIG)
+ return NULL;
+
+ if (flags & RADIX_TREE_ITER_TAGGED)
+ offset = radix_tree_find_next_bit(node, tag,
+ offset + 1);
+ else
+ while (++offset < RADIX_TREE_MAP_SIZE) {
+ void *slot = rcu_dereference_raw(
+ node->slots[offset]);
+ if (slot)
+ break;
+ }
+ index &= ~node_maxindex(node);
+ index += offset << node->shift;
+ /* Overflow after ~0UL */
+ if (!index)
+ return NULL;
+ if (offset == RADIX_TREE_MAP_SIZE)
+ goto restart;
+ child = rcu_dereference_raw(node->slots[offset]);
+ }
+
+ if (!child)
+ goto restart;
+ if (child == RADIX_TREE_RETRY)
+ break;
+ } while (node->shift && radix_tree_is_internal_node(child));
+
+ /* Update the iterator state */
+ iter->index = (index &~ node_maxindex(node)) | offset;
+ iter->next_index = (index | node_maxindex(node)) + 1;
+ iter->node = node;
+
+ if (flags & RADIX_TREE_ITER_TAGGED)
+ set_iter_tags(iter, node, offset, tag);
+
+ return node->slots + offset;
+}
+EXPORT_SYMBOL(radix_tree_next_chunk);
+
+/**
+ * radix_tree_gang_lookup - perform multiple lookup on a radix tree
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ *
+ * Performs an index-ascending scan of the tree for present items. Places
+ * them at *@results and returns the number of items which were placed at
+ * *@results.
+ *
+ * The implementation is naive.
+ *
+ * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
+ * rcu_read_lock. In this case, rather than the returned results being
+ * an atomic snapshot of the tree at a single point in time, the
+ * semantics of an RCU protected gang lookup are as though multiple
+ * radix_tree_lookups have been issued in individual locks, and results
+ * stored in 'results'.
+ */
+unsigned int
+radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
+ unsigned long first_index, unsigned int max_items)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ unsigned int ret = 0;
+
+ if (unlikely(!max_items))
+ return 0;
+
+ radix_tree_for_each_slot(slot, root, &iter, first_index) {
+ results[ret] = rcu_dereference_raw(*slot);
+ if (!results[ret])
+ continue;
+ if (radix_tree_is_internal_node(results[ret])) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ if (++ret == max_items)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup);
+
+/**
+ * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
+ * based on a tag
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
+ *
+ * Performs an index-ascending scan of the tree for present items which
+ * have the tag indexed by @tag set. Places the items at *@results and
+ * returns the number of items which were placed at *@results.
+ */
+unsigned int
+radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ unsigned int ret = 0;
+
+ if (unlikely(!max_items))
+ return 0;
+
+ radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
+ results[ret] = rcu_dereference_raw(*slot);
+ if (!results[ret])
+ continue;
+ if (radix_tree_is_internal_node(results[ret])) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ if (++ret == max_items)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
+
+/**
+ * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
+ * radix tree based on a tag
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
+ *
+ * Performs an index-ascending scan of the tree for present items which
+ * have the tag indexed by @tag set. Places the slots at *@results and
+ * returns the number of slots which were placed at *@results.
+ */
+unsigned int
+radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
+ void __rcu ***results, unsigned long first_index,
+ unsigned int max_items, unsigned int tag)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ unsigned int ret = 0;
+
+ if (unlikely(!max_items))
+ return 0;
+
+ radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
+ results[ret] = slot;
+ if (++ret == max_items)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
+
+static bool __radix_tree_delete(struct radix_tree_root *root,
+ struct radix_tree_node *node, void __rcu **slot)
+{
+ void *old = rcu_dereference_raw(*slot);
+ int values = xa_is_value(old) ? -1 : 0;
+ unsigned offset = get_slot_offset(node, slot);
+ int tag;
+
+ if (is_idr(root))
+ node_tag_set(root, node, IDR_FREE, offset);
+ else
+ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+ node_tag_clear(root, node, tag, offset);
+
+ replace_slot(slot, NULL, node, -1, values);
+ return node && delete_node(root, node);
+}
+
+/**
+ * radix_tree_iter_delete - delete the entry at this iterator position
+ * @root: radix tree root
+ * @iter: iterator state
+ * @slot: pointer to slot
+ *
+ * Delete the entry at the position currently pointed to by the iterator.
+ * This may result in the current node being freed; if it is, the iterator
+ * is advanced so that it will not reference the freed memory. This
+ * function may be called without any locking if there are no other threads
+ * which can access this tree.
+ */
+void radix_tree_iter_delete(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, void __rcu **slot)
+{
+ if (__radix_tree_delete(root, iter->node, slot))
+ iter->index = iter->next_index;
+}
+EXPORT_SYMBOL(radix_tree_iter_delete);
+
+/**
+ * radix_tree_delete_item - delete an item from a radix tree
+ * @root: radix tree root
+ * @index: index key
+ * @item: expected item
+ *
+ * Remove @item at @index from the radix tree rooted at @root.
+ *
+ * Return: the deleted entry, or %NULL if it was not present
+ * or the entry at the given @index was not @item.
+ */
+void *radix_tree_delete_item(struct radix_tree_root *root,
+ unsigned long index, void *item)
+{
+ struct radix_tree_node *node = NULL;
+ void __rcu **slot = NULL;
+ void *entry;
+
+ entry = __radix_tree_lookup(root, index, &node, &slot);
+ if (!slot)
+ return NULL;
+ if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
+ get_slot_offset(node, slot))))
+ return NULL;
+
+ if (item && entry != item)
+ return NULL;
+
+ __radix_tree_delete(root, node, slot);
+
+ return entry;
+}
+EXPORT_SYMBOL(radix_tree_delete_item);
+
+/**
+ * radix_tree_delete - delete an entry from a radix tree
+ * @root: radix tree root
+ * @index: index key
+ *
+ * Remove the entry at @index from the radix tree rooted at @root.
+ *
+ * Return: The deleted entry, or %NULL if it was not present.
+ */
+void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+{
+ return radix_tree_delete_item(root, index, NULL);
+}
+EXPORT_SYMBOL(radix_tree_delete);
+
+/**
+ * radix_tree_tagged - test whether any items in the tree are tagged
+ * @root: radix tree root
+ * @tag: tag to test
+ */
+int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
+{
+ return root_tag_get(root, tag);
+}
+EXPORT_SYMBOL(radix_tree_tagged);
+
+/**
+ * idr_preload - preload for idr_alloc()
+ * @gfp_mask: allocation mask to use for preloading
+ *
+ * Preallocate memory to use for the next call to idr_alloc(). This function
+ * returns with preemption disabled. It will be enabled by idr_preload_end().
+ */
+void idr_preload(gfp_t gfp_mask)
+{
+ if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
+ local_lock(&radix_tree_preloads.lock);
+}
+EXPORT_SYMBOL(idr_preload);
+
+void __rcu **idr_get_free(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, gfp_t gfp,
+ unsigned long max)
+{
+ struct radix_tree_node *node = NULL, *child;
+ void __rcu **slot = (void __rcu **)&root->xa_head;
+ unsigned long maxindex, start = iter->next_index;
+ unsigned int shift, offset = 0;
+
+ grow:
+ shift = radix_tree_load_root(root, &child, &maxindex);
+ if (!radix_tree_tagged(root, IDR_FREE))
+ start = max(start, maxindex + 1);
+ if (start > max)
+ return ERR_PTR(-ENOSPC);
+
+ if (start > maxindex) {
+ int error = radix_tree_extend(root, gfp, start, shift);
+ if (error < 0)
+ return ERR_PTR(error);
+ shift = error;
+ child = rcu_dereference_raw(root->xa_head);
+ }
+ if (start == 0 && shift == 0)
+ shift = RADIX_TREE_MAP_SHIFT;
+
+ while (shift) {
+ shift -= RADIX_TREE_MAP_SHIFT;
+ if (child == NULL) {
+ /* Have to add a child node. */
+ child = radix_tree_node_alloc(gfp, node, root, shift,
+ offset, 0, 0);
+ if (!child)
+ return ERR_PTR(-ENOMEM);
+ all_tag_set(child, IDR_FREE);
+ rcu_assign_pointer(*slot, node_to_entry(child));
+ if (node)
+ node->count++;
+ } else if (!radix_tree_is_internal_node(child))
+ break;
+
+ node = entry_to_node(child);
+ offset = radix_tree_descend(node, &child, start);
+ if (!tag_get(node, IDR_FREE, offset)) {
+ offset = radix_tree_find_next_bit(node, IDR_FREE,
+ offset + 1);
+ start = next_index(start, node, offset);
+ if (start > max || start == 0)
+ return ERR_PTR(-ENOSPC);
+ while (offset == RADIX_TREE_MAP_SIZE) {
+ offset = node->offset + 1;
+ node = node->parent;
+ if (!node)
+ goto grow;
+ shift = node->shift;
+ }
+ child = rcu_dereference_raw(node->slots[offset]);
+ }
+ slot = &node->slots[offset];
+ }
+
+ iter->index = start;
+ if (node)
+ iter->next_index = 1 + min(max, (start | node_maxindex(node)));
+ else
+ iter->next_index = 1;
+ iter->node = node;
+ set_iter_tags(iter, node, offset, IDR_FREE);
+
+ return slot;
+}
+
+/**
+ * idr_destroy - release all internal memory from an IDR
+ * @idr: idr handle
+ *
+ * After this function is called, the IDR is empty, and may be reused or
+ * the data structure containing it may be freed.
+ *
+ * A typical clean-up sequence for objects stored in an idr tree will use
+ * idr_for_each() to free all objects, if necessary, then idr_destroy() to
+ * free the memory used to keep track of those objects.
+ */
+void idr_destroy(struct idr *idr)
+{
+ struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
+ if (radix_tree_is_internal_node(node))
+ radix_tree_free_nodes(node);
+ idr->idr_rt.xa_head = NULL;
+ root_tag_set(&idr->idr_rt, IDR_FREE);
+}
+EXPORT_SYMBOL(idr_destroy);
+
+static void
+radix_tree_node_ctor(void *arg)
+{
+ struct radix_tree_node *node = arg;
+
+ memset(node, 0, sizeof(*node));
+ INIT_LIST_HEAD(&node->private_list);
+}
+
+static int radix_tree_cpu_dead(unsigned int cpu)
+{
+ struct radix_tree_preload *rtp;
+ struct radix_tree_node *node;
+
+ /* Free per-cpu pool of preloaded nodes */
+ rtp = &per_cpu(radix_tree_preloads, cpu);
+ while (rtp->nr) {
+ node = rtp->nodes;
+ rtp->nodes = node->parent;
+ kmem_cache_free(radix_tree_node_cachep, node);
+ rtp->nr--;
+ }
+ return 0;
+}
+
+void __init radix_tree_init(void)
+{
+ int ret;
+
+ BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
+ BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
+ BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
+ radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
+ sizeof(struct radix_tree_node), 0,
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ radix_tree_node_ctor);
+ ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
+ NULL, radix_tree_cpu_dead);
+ WARN_ON(ret < 0);
+}
diff --git a/lib/radix-tree.h b/lib/radix-tree.h
new file mode 100644
index 000000000..40d5c03e2
--- /dev/null
+++ b/lib/radix-tree.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* radix-tree helpers that are only shared with xarray */
+
+struct kmem_cache;
+struct rcu_head;
+
+extern struct kmem_cache *radix_tree_node_cachep;
+extern void radix_tree_node_rcu_free(struct rcu_head *head);
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
new file mode 100644
index 000000000..6be57745a
--- /dev/null
+++ b/lib/raid6/.gitignore
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+mktables
+altivec*.c
+int*.c
+tables.c
+neon?.c
+s390vx?.c
+vpermxor*.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
new file mode 100644
index 000000000..035b0a4db
--- /dev/null
+++ b/lib/raid6/Makefile
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
+
+raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
+ int8.o int16.o int32.o
+
+raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
+raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
+raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
+raid6_pq-$(CONFIG_LOONGARCH) += loongarch_simd.o recov_loongarch_simd.o
+
+hostprogs += mktables
+
+ifeq ($(CONFIG_ALTIVEC),y)
+altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+# Enable <altivec.h>
+altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
+
+ifdef CONFIG_CC_IS_CLANG
+# clang ppc port does not yet support -maltivec when -msoft-float is
+# enabled. A future release of clang will resolve this
+# https://bugs.llvm.org/show_bug.cgi?id=31177
+CFLAGS_REMOVE_altivec1.o += -msoft-float
+CFLAGS_REMOVE_altivec2.o += -msoft-float
+CFLAGS_REMOVE_altivec4.o += -msoft-float
+CFLAGS_REMOVE_altivec8.o += -msoft-float
+CFLAGS_REMOVE_vpermxor1.o += -msoft-float
+CFLAGS_REMOVE_vpermxor2.o += -msoft-float
+CFLAGS_REMOVE_vpermxor4.o += -msoft-float
+CFLAGS_REMOVE_vpermxor8.o += -msoft-float
+endif
+endif
+
+# The GCC option -ffreestanding is required in order to compile code containing
+# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+NEON_FLAGS := -ffreestanding
+# Enable <arm_neon.h>
+NEON_FLAGS += -isystem $(shell $(CC) -print-file-name=include)
+ifeq ($(ARCH),arm)
+NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
+endif
+CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
+ifeq ($(ARCH),arm64)
+CFLAGS_REMOVE_recov_neon_inner.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
+endif
+endif
+
+quiet_cmd_unroll = UNROLL $@
+ cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
+
+targets += int1.c int2.c int4.c int8.c int16.c int32.c
+$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_altivec1.o += $(altivec_flags)
+CFLAGS_altivec2.o += $(altivec_flags)
+CFLAGS_altivec4.o += $(altivec_flags)
+CFLAGS_altivec8.o += $(altivec_flags)
+targets += altivec1.c altivec2.c altivec4.c altivec8.c
+$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_vpermxor1.o += $(altivec_flags)
+CFLAGS_vpermxor2.o += $(altivec_flags)
+CFLAGS_vpermxor4.o += $(altivec_flags)
+CFLAGS_vpermxor8.o += $(altivec_flags)
+targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
+$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_neon1.o += $(NEON_FLAGS)
+CFLAGS_neon2.o += $(NEON_FLAGS)
+CFLAGS_neon4.o += $(NEON_FLAGS)
+CFLAGS_neon8.o += $(NEON_FLAGS)
+targets += neon1.c neon2.c neon4.c neon8.c
+$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+targets += s390vx8.c
+$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+quiet_cmd_mktable = TABLE $@
+ cmd_mktable = $(obj)/mktables > $@
+
+targets += tables.c
+$(obj)/tables.c: $(obj)/mktables FORCE
+ $(call if_changed,mktable)
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
new file mode 100644
index 000000000..0ec534faf
--- /dev/null
+++ b/lib/raid6/algos.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6/algos.c
+ *
+ * Algorithm list and algorithm selection for RAID-6
+ */
+
+#include <linux/raid/pq.h>
+#ifndef __KERNEL__
+#include <sys/mman.h>
+#include <stdio.h>
+#else
+#include <linux/module.h>
+#include <linux/gfp.h>
+/* In .bss so it's zeroed */
+const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
+EXPORT_SYMBOL(raid6_empty_zero_page);
+#endif
+
+struct raid6_calls raid6_call;
+EXPORT_SYMBOL_GPL(raid6_call);
+
+const struct raid6_calls * const raid6_algos[] = {
+#if defined(__i386__) && !defined(__arch_um__)
+#ifdef CONFIG_AS_AVX512
+ &raid6_avx512x2,
+ &raid6_avx512x1,
+#endif
+ &raid6_avx2x2,
+ &raid6_avx2x1,
+ &raid6_sse2x2,
+ &raid6_sse2x1,
+ &raid6_sse1x2,
+ &raid6_sse1x1,
+ &raid6_mmxx2,
+ &raid6_mmxx1,
+#endif
+#if defined(__x86_64__) && !defined(__arch_um__)
+#ifdef CONFIG_AS_AVX512
+ &raid6_avx512x4,
+ &raid6_avx512x2,
+ &raid6_avx512x1,
+#endif
+ &raid6_avx2x4,
+ &raid6_avx2x2,
+ &raid6_avx2x1,
+ &raid6_sse2x4,
+ &raid6_sse2x2,
+ &raid6_sse2x1,
+#endif
+#ifdef CONFIG_ALTIVEC
+ &raid6_vpermxor8,
+ &raid6_vpermxor4,
+ &raid6_vpermxor2,
+ &raid6_vpermxor1,
+ &raid6_altivec8,
+ &raid6_altivec4,
+ &raid6_altivec2,
+ &raid6_altivec1,
+#endif
+#if defined(CONFIG_S390)
+ &raid6_s390vx8,
+#endif
+#ifdef CONFIG_KERNEL_MODE_NEON
+ &raid6_neonx8,
+ &raid6_neonx4,
+ &raid6_neonx2,
+ &raid6_neonx1,
+#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_lsx,
+#endif
+#endif
+#if defined(__ia64__)
+ &raid6_intx32,
+ &raid6_intx16,
+#endif
+ &raid6_intx8,
+ &raid6_intx4,
+ &raid6_intx2,
+ &raid6_intx1,
+ NULL
+};
+
+void (*raid6_2data_recov)(int, size_t, int, int, void **);
+EXPORT_SYMBOL_GPL(raid6_2data_recov);
+
+void (*raid6_datap_recov)(int, size_t, int, void **);
+EXPORT_SYMBOL_GPL(raid6_datap_recov);
+
+const struct raid6_recov_calls *const raid6_recov_algos[] = {
+#ifdef CONFIG_X86
+#ifdef CONFIG_AS_AVX512
+ &raid6_recov_avx512,
+#endif
+ &raid6_recov_avx2,
+ &raid6_recov_ssse3,
+#endif
+#ifdef CONFIG_S390
+ &raid6_recov_s390xc,
+#endif
+#if defined(CONFIG_KERNEL_MODE_NEON)
+ &raid6_recov_neon,
+#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_recov_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_recov_lsx,
+#endif
+#endif
+ &raid6_recov_intx1,
+ NULL
+};
+
+#ifdef __KERNEL__
+#define RAID6_TIME_JIFFIES_LG2 4
+#else
+/* Need more time to be stable in userspace */
+#define RAID6_TIME_JIFFIES_LG2 9
+#define time_before(x, y) ((x) < (y))
+#endif
+
+#define RAID6_TEST_DISKS 8
+#define RAID6_TEST_DISKS_ORDER 3
+
+static inline const struct raid6_recov_calls *raid6_choose_recov(void)
+{
+ const struct raid6_recov_calls *const *algo;
+ const struct raid6_recov_calls *best;
+
+ for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
+ if (!best || (*algo)->priority > best->priority)
+ if (!(*algo)->valid || (*algo)->valid())
+ best = *algo;
+
+ if (best) {
+ raid6_2data_recov = best->data2;
+ raid6_datap_recov = best->datap;
+
+ pr_info("raid6: using %s recovery algorithm\n", best->name);
+ } else
+ pr_err("raid6: Yikes! No recovery algorithm found!\n");
+
+ return best;
+}
+
+static inline const struct raid6_calls *raid6_choose_gen(
+ void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
+{
+ unsigned long perf, bestgenperf, j0, j1;
+ int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
+ const struct raid6_calls *const *algo;
+ const struct raid6_calls *best;
+
+ for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
+ if (!best || (*algo)->priority >= best->priority) {
+ if ((*algo)->valid && !(*algo)->valid())
+ continue;
+
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ best = *algo;
+ break;
+ }
+
+ perf = 0;
+
+ preempt_disable();
+ j0 = jiffies;
+ while ((j1 = jiffies) == j0)
+ cpu_relax();
+ while (time_before(jiffies,
+ j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
+ (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
+ perf++;
+ }
+ preempt_enable();
+
+ if (perf > bestgenperf) {
+ bestgenperf = perf;
+ best = *algo;
+ }
+ pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
+ (perf * HZ * (disks-2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
+ }
+ }
+
+ if (!best) {
+ pr_err("raid6: Yikes! No algorithm found!\n");
+ goto out;
+ }
+
+ raid6_call = *best;
+
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ pr_info("raid6: skipped pq benchmark and selected %s\n",
+ best->name);
+ goto out;
+ }
+
+ pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
+ best->name,
+ (bestgenperf * HZ * (disks - 2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
+
+ if (best->xor_syndrome) {
+ perf = 0;
+
+ preempt_disable();
+ j0 = jiffies;
+ while ((j1 = jiffies) == j0)
+ cpu_relax();
+ while (time_before(jiffies,
+ j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
+ best->xor_syndrome(disks, start, stop,
+ PAGE_SIZE, *dptrs);
+ perf++;
+ }
+ preempt_enable();
+
+ pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
+ (perf * HZ * (disks - 2)) >>
+ (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
+ }
+
+out:
+ return best;
+}
+
+
+/* Try to pick the best algorithm */
+/* This code uses the gfmul table as convenient data set to abuse */
+
+int __init raid6_select_algo(void)
+{
+ const int disks = RAID6_TEST_DISKS;
+
+ const struct raid6_calls *gen_best;
+ const struct raid6_recov_calls *rec_best;
+ char *disk_ptr, *p;
+ void *dptrs[RAID6_TEST_DISKS];
+ int i, cycle;
+
+ /* prepare the buffer and fill it circularly with gfmul table */
+ disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
+ if (!disk_ptr) {
+ pr_err("raid6: Yikes! No memory available.\n");
+ return -ENOMEM;
+ }
+
+ p = disk_ptr;
+ for (i = 0; i < disks; i++)
+ dptrs[i] = p + PAGE_SIZE * i;
+
+ cycle = ((disks - 2) * PAGE_SIZE) / 65536;
+ for (i = 0; i < cycle; i++) {
+ memcpy(p, raid6_gfmul, 65536);
+ p += 65536;
+ }
+
+ if ((disks - 2) * PAGE_SIZE % 65536)
+ memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
+
+ /* select raid gen_syndrome function */
+ gen_best = raid6_choose_gen(&dptrs, disks);
+
+ /* select raid recover functions */
+ rec_best = raid6_choose_recov();
+
+ free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
+
+ return gen_best && rec_best ? 0 : -EINVAL;
+}
+
+static void raid6_exit(void)
+{
+ do { } while (0);
+}
+
+subsys_initcall(raid6_select_algo);
+module_exit(raid6_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
new file mode 100644
index 000000000..d20ed0d11
--- /dev/null
+++ b/lib/raid6/altivec.uc
@@ -0,0 +1,132 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6altivec$#.c
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ *
+ * This file is postprocessed using unroll.awk
+ *
+ * <benh> hpa: in process,
+ * you can just "steal" the vec unit with enable_kernel_altivec() (but
+ * bracked this with preempt_disable/enable or in a lock)
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef CONFIG_ALTIVEC
+
+#include <altivec.h>
+#ifdef __KERNEL__
+# include <asm/cputable.h>
+# include <asm/switch_to.h>
+#endif /* __KERNEL__ */
+
+/*
+ * This is the C data type to use. We use a vector of
+ * signed char so vec_cmpgt() will generate the right
+ * instruction.
+ */
+
+typedef vector signed char unative_t;
+
+#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
+#define NSIZE sizeof(unative_t)
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
+{
+ return vec_add(v,v);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline __attribute_const__ unative_t MASK(unative_t v)
+{
+ unative_t zv = NBYTES(0);
+
+ /* vec_cmpgt returns a vector bool char; thus the need for the cast */
+ return (unative_t)vec_cmpgt(zv, v);
+}
+
+
+/* This is noinline to make damned sure that gcc doesn't move any of the
+ Altivec code around the enable/disable code */
+static void noinline
+raid6_altivec$#_gen_syndrome_real(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ unative_t x1d = NBYTES(0x1d);
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ wp$$ = vec_xor(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ = vec_and(w2$$, x1d);
+ w1$$ = vec_xor(w1$$, w2$$);
+ wq$$ = vec_xor(w1$$, wd$$);
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+}
+
+static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+
+ raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
+
+ disable_kernel_altivec();
+ preempt_enable();
+}
+
+int raid6_have_altivec(void);
+#if $# == 1
+int raid6_have_altivec(void)
+{
+ /* This assumes either all CPUs have Altivec or none does */
+# ifdef __KERNEL__
+ return cpu_has_feature(CPU_FTR_ALTIVEC);
+# else
+ return 1;
+# endif
+}
+#endif
+
+const struct raid6_calls raid6_altivec$# = {
+ raid6_altivec$#_gen_syndrome,
+ NULL, /* XOR not yet implemented */
+ raid6_have_altivec,
+ "altivecx$#",
+ 0
+};
+
+#endif /* CONFIG_ALTIVEC */
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
new file mode 100644
index 000000000..059024234
--- /dev/null
+++ b/lib/raid6/avx2.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+ *
+ * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * AVX2 implementation of RAID-6 syndrome functions
+ *
+ */
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static const struct raid6_avx2_constants {
+ u64 x1d[4];
+} raid6_avx2_constants __aligned(32) = {
+ { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
+};
+
+static int raid6_have_avx2(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
+}
+
+/*
+ * Plain AVX2 implementation
+ */
+static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+ asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */
+
+ for (d = 0; d < bytes; d += 32) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
+ asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */
+ asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d]));
+ for (z = z0-2; z >= 0; z--) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm6,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm4,%ymm4");
+ asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d]));
+ }
+ asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm6,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm4,%ymm4");
+
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vpxor %ymm2,%ymm2,%ymm2");
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vpxor %ymm4,%ymm4,%ymm4");
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 32) {
+ asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+ asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ }
+ asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx2x1 = {
+ raid6_avx21_gen_syndrome,
+ raid6_avx21_xor_syndrome,
+ raid6_have_avx2,
+ "avx2x1",
+ .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
+};
+
+/*
+ * Unrolled-by-2 AVX2 implementation
+ */
+static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+ asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
+
+ /* We uniformly assume a single prefetch covers at least 32 bytes */
+ for (d = 0; d < bytes; d += 64) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
+ asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
+ asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
+ for (z = z0-1; z >= 0; z--) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
+ asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ }
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 64) {
+ asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+ asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7"
+ :: "m" (dptr[z][d+32]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ }
+ asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+ asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx2x2 = {
+ raid6_avx22_gen_syndrome,
+ raid6_avx22_xor_syndrome,
+ raid6_have_avx2,
+ "avx2x2",
+ .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
+};
+
+#ifdef CONFIG_X86_64
+
+/*
+ * Unrolled-by-4 AVX2 implementation
+ */
+static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+ asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
+ asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */
+ asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */
+ asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */
+ asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */
+ asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */
+ asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */
+ asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */
+ asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */
+
+ for (d = 0; d < bytes; d += 128) {
+ for (z = z0; z >= 0; z--) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
+ asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
+ asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
+ asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+ asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpand %ymm0,%ymm13,%ymm13");
+ asm volatile("vpand %ymm0,%ymm15,%ymm15");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
+ asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
+ asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm13,%ymm10,%ymm10");
+ asm volatile("vpxor %ymm15,%ymm11,%ymm11");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ }
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vpxor %ymm2,%ymm2,%ymm2");
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vpxor %ymm3,%ymm3,%ymm3");
+ asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+ asm volatile("vpxor %ymm10,%ymm10,%ymm10");
+ asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+ asm volatile("vpxor %ymm11,%ymm11,%ymm11");
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vpxor %ymm4,%ymm4,%ymm4");
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vpxor %ymm6,%ymm6,%ymm6");
+ asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+ asm volatile("vpxor %ymm12,%ymm12,%ymm12");
+ asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+ asm volatile("vpxor %ymm14,%ymm14,%ymm14");
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 128) {
+ asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+ asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+ asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
+ asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
+ asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+ asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+ asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
+ asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
+ asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm12,%ymm10,%ymm10");
+ asm volatile("vpxor %ymm14,%ymm11,%ymm11");
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+ asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+ asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+ asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpand %ymm0,%ymm13,%ymm13");
+ asm volatile("vpand %ymm0,%ymm15,%ymm15");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+ asm volatile("vmovdqa %0,%%ymm7"
+ :: "m" (dptr[z][d+32]));
+ asm volatile("vmovdqa %0,%%ymm13"
+ :: "m" (dptr[z][d+64]));
+ asm volatile("vmovdqa %0,%%ymm15"
+ :: "m" (dptr[z][d+96]));
+ asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+ asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+ asm volatile("vpxor %ymm13,%ymm10,%ymm10");
+ asm volatile("vpxor %ymm15,%ymm11,%ymm11");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ }
+ asm volatile("prefetchnta %0" :: "m" (q[d]));
+ asm volatile("prefetchnta %0" :: "m" (q[d+64]));
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+ asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+ asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+ asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+ asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+ asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+ asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+ asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+ asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+ asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+ asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+ asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+ asm volatile("vpand %ymm0,%ymm5,%ymm5");
+ asm volatile("vpand %ymm0,%ymm7,%ymm7");
+ asm volatile("vpand %ymm0,%ymm13,%ymm13");
+ asm volatile("vpand %ymm0,%ymm15,%ymm15");
+ asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+ asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+ asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+ asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+ }
+ asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+ asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+ asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+ asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+ asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+ asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+ asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
+ asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
+ asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+ asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+ asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+ asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+ }
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx2x4 = {
+ raid6_avx24_gen_syndrome,
+ raid6_avx24_xor_syndrome,
+ raid6_have_avx2,
+ "avx2x4",
+ .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
+};
+#endif /* CONFIG_X86_64 */
diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c
new file mode 100644
index 000000000..9c3e822e1
--- /dev/null
+++ b/lib/raid6/avx512.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- --------------------------------------------------------
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Author: Megha Dey <megha.dey@linux.intel.com>
+ *
+ * Based on avx2.c: Copyright 2012 Yuanhan Liu All Rights Reserved
+ * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * -----------------------------------------------------------------------
+ */
+
+/*
+ * AVX512 implementation of RAID-6 syndrome functions
+ *
+ */
+
+#ifdef CONFIG_AS_AVX512
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static const struct raid6_avx512_constants {
+ u64 x1d[8];
+} raid6_avx512_constants __aligned(512/8) = {
+ { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
+ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
+};
+
+static int raid6_have_avx512(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512DQ);
+}
+
+static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0\n\t"
+ "vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
+ :
+ : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0; d < bytes; d += 64) {
+ asm volatile("prefetchnta %0\n\t"
+ "vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
+ "prefetchnta %1\n\t"
+ "vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
+ "vmovdqa64 %1,%%zmm6"
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0-1][d]));
+ for (z = z0-2; z >= 0; z--) {
+ asm volatile("prefetchnta %0\n\t"
+ "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
+ "vmovdqa64 %0,%%zmm6"
+ :
+ : "m" (dptr[z][d]));
+ }
+ asm volatile("vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
+ "vmovntdq %%zmm2,%0\n\t"
+ "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
+ "vmovntdq %%zmm4,%1\n\t"
+ "vpxorq %%zmm4,%%zmm4,%%zmm4"
+ :
+ : "m" (p[d]), "m" (q[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0"
+ : : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 64) {
+ asm volatile("vmovdqa64 %0,%%zmm4\n\t"
+ "vmovdqa64 %1,%%zmm2\n\t"
+ "vpxorq %%zmm4,%%zmm2,%%zmm2"
+ :
+ : "m" (dptr[z0][d]), "m" (p[d]));
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4"
+ :
+ : "m" (dptr[z][d]));
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4"
+ :
+ : );
+ }
+ asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
+ /* Don't use movntdq for r/w memory area < cache line */
+ "vmovdqa64 %%zmm4,%0\n\t"
+ "vmovdqa64 %%zmm2,%1"
+ :
+ : "m" (q[d]), "m" (p[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx512x1 = {
+ raid6_avx5121_gen_syndrome,
+ raid6_avx5121_xor_syndrome,
+ raid6_have_avx512,
+ "avx512x1",
+ .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
+};
+
+/*
+ * Unrolled-by-2 AVX512 implementation
+ */
+static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0\n\t"
+ "vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
+ :
+ : "m" (raid6_avx512_constants.x1d[0]));
+
+ /* We uniformly assume a single prefetch covers at least 64 bytes */
+ for (d = 0; d < bytes; d += 128) {
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ "vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
+ "vmovdqa64 %1,%%zmm3\n\t" /* P[1] */
+ "vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
+ "vmovdqa64 %%zmm3,%%zmm6" /* Q[1] */
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]));
+ for (z = z0-1; z >= 0; z--) {
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
+ }
+ asm volatile("vmovntdq %%zmm2,%0\n\t"
+ "vmovntdq %%zmm3,%1\n\t"
+ "vmovntdq %%zmm4,%2\n\t"
+ "vmovntdq %%zmm6,%3"
+ :
+ : "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
+ "m" (q[d+64]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0"
+ : : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 128) {
+ asm volatile("vmovdqa64 %0,%%zmm4\n\t"
+ "vmovdqa64 %1,%%zmm6\n\t"
+ "vmovdqa64 %2,%%zmm2\n\t"
+ "vmovdqa64 %3,%%zmm3\n\t"
+ "vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm3,%%zmm3"
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
+ "m" (p[d]), "m" (p[d+64]));
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]));
+ }
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6"
+ :
+ : );
+ }
+ asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
+ "vpxorq %1,%%zmm6,%%zmm6\n\t"
+ /* Don't use movntdq for r/w
+ * memory area < cache line
+ */
+ "vmovdqa64 %%zmm4,%0\n\t"
+ "vmovdqa64 %%zmm6,%1\n\t"
+ "vmovdqa64 %%zmm2,%2\n\t"
+ "vmovdqa64 %%zmm3,%3"
+ :
+ : "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
+ "m" (p[d+64]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_avx512x2 = {
+ raid6_avx5122_gen_syndrome,
+ raid6_avx5122_xor_syndrome,
+ raid6_have_avx512,
+ "avx512x2",
+ .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
+};
+
+#ifdef CONFIG_X86_64
+
+/*
+ * Unrolled-by-4 AVX2 implementation
+ */
+static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0\n\t"
+ "vpxorq %%zmm1,%%zmm1,%%zmm1\n\t" /* Zero temp */
+ "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t" /* P[0] */
+ "vpxorq %%zmm3,%%zmm3,%%zmm3\n\t" /* P[1] */
+ "vpxorq %%zmm4,%%zmm4,%%zmm4\n\t" /* Q[0] */
+ "vpxorq %%zmm6,%%zmm6,%%zmm6\n\t" /* Q[1] */
+ "vpxorq %%zmm10,%%zmm10,%%zmm10\n\t" /* P[2] */
+ "vpxorq %%zmm11,%%zmm11,%%zmm11\n\t" /* P[3] */
+ "vpxorq %%zmm12,%%zmm12,%%zmm12\n\t" /* Q[2] */
+ "vpxorq %%zmm14,%%zmm14,%%zmm14" /* Q[3] */
+ :
+ : "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0; d < bytes; d += 256) {
+ for (z = z0; z >= 0; z--) {
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ "prefetchnta %2\n\t"
+ "prefetchnta %3\n\t"
+ "vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
+ "vpcmpgtb %%zmm12,%%zmm1,%%k3\n\t"
+ "vpcmpgtb %%zmm14,%%zmm1,%%k4\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpmovm2b %%k3,%%zmm13\n\t"
+ "vpmovm2b %%k4,%%zmm15\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
+ "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vmovdqa64 %2,%%zmm13\n\t"
+ "vmovdqa64 %3,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
+ "vpxorq %%zmm15,%%zmm11,%%zmm11\n"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
+ "m" (dptr[z][d+128]), "m" (dptr[z][d+192]));
+ }
+ asm volatile("vmovntdq %%zmm2,%0\n\t"
+ "vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
+ "vmovntdq %%zmm3,%1\n\t"
+ "vpxorq %%zmm3,%%zmm3,%%zmm3\n\t"
+ "vmovntdq %%zmm10,%2\n\t"
+ "vpxorq %%zmm10,%%zmm10,%%zmm10\n\t"
+ "vmovntdq %%zmm11,%3\n\t"
+ "vpxorq %%zmm11,%%zmm11,%%zmm11\n\t"
+ "vmovntdq %%zmm4,%4\n\t"
+ "vpxorq %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vmovntdq %%zmm6,%5\n\t"
+ "vpxorq %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vmovntdq %%zmm12,%6\n\t"
+ "vpxorq %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vmovntdq %%zmm14,%7\n\t"
+ "vpxorq %%zmm14,%%zmm14,%%zmm14"
+ :
+ : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
+ "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
+ "m" (q[d+128]), "m" (q[d+192]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("vmovdqa64 %0,%%zmm0"
+ :: "m" (raid6_avx512_constants.x1d[0]));
+
+ for (d = 0 ; d < bytes ; d += 256) {
+ asm volatile("vmovdqa64 %0,%%zmm4\n\t"
+ "vmovdqa64 %1,%%zmm6\n\t"
+ "vmovdqa64 %2,%%zmm12\n\t"
+ "vmovdqa64 %3,%%zmm14\n\t"
+ "vmovdqa64 %4,%%zmm2\n\t"
+ "vmovdqa64 %5,%%zmm3\n\t"
+ "vmovdqa64 %6,%%zmm10\n\t"
+ "vmovdqa64 %7,%%zmm11\n\t"
+ "vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm6,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm12,%%zmm10,%%zmm10\n\t"
+ "vpxorq %%zmm14,%%zmm11,%%zmm11"
+ :
+ : "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
+ "m" (dptr[z0][d+128]), "m" (dptr[z0][d+192]),
+ "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
+ "m" (p[d+192]));
+ /* P/Q data pages */
+ for (z = z0-1 ; z >= start ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
+ "vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
+ "prefetchnta %0\n\t"
+ "prefetchnta %2\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
+ "vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpmovm2b %%k3,%%zmm13\n\t"
+ "vpmovm2b %%k4,%%zmm15\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vpaddb %%Zmm14,%%zmm14,%%zmm14\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
+ "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
+ "vmovdqa64 %0,%%zmm5\n\t"
+ "vmovdqa64 %1,%%zmm7\n\t"
+ "vmovdqa64 %2,%%zmm13\n\t"
+ "vmovdqa64 %3,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
+ "vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
+ "vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
+ "vpxorq %%zmm15,%%zmm11,%%zmm11\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14"
+ :
+ : "m" (dptr[z][d]), "m" (dptr[z][d+64]),
+ "m" (dptr[z][d+128]),
+ "m" (dptr[z][d+192]));
+ }
+ asm volatile("prefetchnta %0\n\t"
+ "prefetchnta %1\n\t"
+ :
+ : "m" (q[d]), "m" (q[d+128]));
+ /* P/Q left side optimization */
+ for (z = start-1 ; z >= 0 ; z--) {
+ asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
+ "vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
+ "vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
+ "vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
+ "vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
+ "vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
+ "vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
+ "vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
+ "vpmovm2b %%k1,%%zmm5\n\t"
+ "vpmovm2b %%k2,%%zmm7\n\t"
+ "vpmovm2b %%k3,%%zmm13\n\t"
+ "vpmovm2b %%k4,%%zmm15\n\t"
+ "vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
+ "vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
+ "vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
+ "vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
+ "vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
+ "vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
+ "vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
+ "vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
+ "vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
+ "vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
+ "vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
+ "vpxorq %%zmm15,%%zmm14,%%zmm14"
+ :
+ : );
+ }
+ asm volatile("vmovntdq %%zmm2,%0\n\t"
+ "vmovntdq %%zmm3,%1\n\t"
+ "vmovntdq %%zmm10,%2\n\t"
+ "vmovntdq %%zmm11,%3\n\t"
+ "vpxorq %4,%%zmm4,%%zmm4\n\t"
+ "vpxorq %5,%%zmm6,%%zmm6\n\t"
+ "vpxorq %6,%%zmm12,%%zmm12\n\t"
+ "vpxorq %7,%%zmm14,%%zmm14\n\t"
+ "vmovntdq %%zmm4,%4\n\t"
+ "vmovntdq %%zmm6,%5\n\t"
+ "vmovntdq %%zmm12,%6\n\t"
+ "vmovntdq %%zmm14,%7"
+ :
+ : "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
+ "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
+ "m" (q[d+128]), "m" (q[d+192]));
+ }
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+const struct raid6_calls raid6_avx512x4 = {
+ raid6_avx5124_gen_syndrome,
+ raid6_avx5124_xor_syndrome,
+ raid6_have_avx512,
+ "avx512x4",
+ .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
+};
+#endif
+
+#endif /* CONFIG_AS_AVX512 */
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc
new file mode 100644
index 000000000..558aeac93
--- /dev/null
+++ b/lib/raid6/int.uc
@@ -0,0 +1,156 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * int$#.c
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ *
+ * This file is postprocessed using unroll.awk
+ */
+
+#include <linux/raid/pq.h>
+
+/*
+ * This is the C data type to use
+ */
+
+/* Change this from BITS_PER_LONG if there is something better... */
+#if BITS_PER_LONG == 64
+# define NBYTES(x) ((x) * 0x0101010101010101UL)
+# define NSIZE 8
+# define NSHIFT 3
+# define NSTRING "64"
+typedef u64 unative_t;
+#else
+# define NBYTES(x) ((x) * 0x01010101U)
+# define NSIZE 4
+# define NSHIFT 2
+# define NSTRING "32"
+typedef u32 unative_t;
+#endif
+
+
+
+/*
+ * IA-64 wants insane amounts of unrolling. On other architectures that
+ * is just a waste of space.
+ */
+#if ($# <= 8) || defined(__ia64__)
+
+
+/*
+ * These sub-operations are separate inlines since they can sometimes be
+ * specially optimized using architecture-specific hacks.
+ */
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline __attribute_const__ unative_t SHLBYTE(unative_t v)
+{
+ unative_t vv;
+
+ vv = (v << 1) & NBYTES(0xfe);
+ return vv;
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline __attribute_const__ unative_t MASK(unative_t v)
+{
+ unative_t vv;
+
+ vv = v & NBYTES(0x80);
+ vv = (vv << 1) - (vv >> 7); /* Overflow on the top bit is OK */
+ return vv;
+}
+
+
+static void raid6_int$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ wp$$ ^= wd$$;
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ &= NBYTES(0x1d);
+ w1$$ ^= w2$$;
+ wq$$ = w1$$ ^ wd$$;
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+}
+
+static void raid6_int$#_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ /* P/Q data pages */
+ wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ wp$$ ^= wd$$;
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ &= NBYTES(0x1d);
+ w1$$ ^= w2$$;
+ wq$$ = w1$$ ^ wd$$;
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+ w2$$ &= NBYTES(0x1d);
+ wq$$ = w1$$ ^ w2$$;
+ }
+ *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ }
+
+}
+
+const struct raid6_calls raid6_intx$# = {
+ raid6_int$#_gen_syndrome,
+ raid6_int$#_xor_syndrome,
+ NULL, /* always valid */
+ "int" NSTRING "x$#",
+ 0
+};
+
+#endif
diff --git a/lib/raid6/loongarch.h b/lib/raid6/loongarch.h
new file mode 100644
index 000000000..acfc33ce7
--- /dev/null
+++ b/lib/raid6/loongarch.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * raid6/loongarch.h
+ *
+ * Definitions common to LoongArch RAID-6 code only
+ */
+
+#ifndef _LIB_RAID6_LOONGARCH_H
+#define _LIB_RAID6_LOONGARCH_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+#else /* for user-space testing */
+
+#include <sys/auxv.h>
+
+/* have to supply these defines for glibc 2.37- and musl */
+#ifndef HWCAP_LOONGARCH_LSX
+#define HWCAP_LOONGARCH_LSX (1 << 4)
+#endif
+#ifndef HWCAP_LOONGARCH_LASX
+#define HWCAP_LOONGARCH_LASX (1 << 5)
+#endif
+
+#define kernel_fpu_begin()
+#define kernel_fpu_end()
+
+#define cpu_has_lsx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LSX)
+#define cpu_has_lasx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LASX)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LIB_RAID6_LOONGARCH_H */
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid6/loongarch_simd.c
new file mode 100644
index 000000000..aa5d9f924
--- /dev/null
+++ b/lib/raid6/loongarch_simd.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RAID6 syndrome calculations in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on the generic RAID-6 code (int.uc):
+ *
+ * Copyright 2002-2004 H. Peter Anvin
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * The vector algorithms are currently priority 0, which means the generic
+ * scalar algorithms are not being disabled if vector support is present.
+ * This is like the similar LoongArch RAID5 XOR code, with the main reason
+ * repeated here: it cannot be ruled out at this point of time, that some
+ * future (maybe reduced) models could run the vector algorithms slower than
+ * the scalar ones, maybe for errata or micro-op reasons. It may be
+ * appropriate to revisit this after one or two more uarch generations.
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+#define NSIZE 16
+
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
+ asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
+ asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
+ asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
+ asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr12");
+ asm volatile("vxor.v $vr5, $vr17, $vr13");
+ asm volatile("vxor.v $vr6, $vr18, $vr14");
+ asm volatile("vxor.v $vr7, $vr19, $vr15");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "vld $vr20, %0\n\t"
+ "vld $vr21, %1\n\t"
+ "vld $vr22, %2\n\t"
+ "vld $vr23, %3\n\t"
+ "vld $vr24, %4\n\t"
+ "vld $vr25, %5\n\t"
+ "vld $vr26, %6\n\t"
+ "vld $vr27, %7\n\t"
+ "vxor.v $vr20, $vr20, $vr0\n\t"
+ "vxor.v $vr21, $vr21, $vr1\n\t"
+ "vxor.v $vr22, $vr22, $vr2\n\t"
+ "vxor.v $vr23, $vr23, $vr3\n\t"
+ "vxor.v $vr24, $vr24, $vr4\n\t"
+ "vxor.v $vr25, $vr25, $vr5\n\t"
+ "vxor.v $vr26, $vr26, $vr6\n\t"
+ "vxor.v $vr27, $vr27, $vr7\n\t"
+ "vst $vr20, %0\n\t"
+ "vst $vr21, %1\n\t"
+ "vst $vr22, %2\n\t"
+ "vst $vr23, %3\n\t"
+ "vst $vr24, %4\n\t"
+ "vst $vr25, %5\n\t"
+ "vst $vr26, %6\n\t"
+ "vst $vr27, %7\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lsx = {
+ raid6_lsx_gen_syndrome,
+ raid6_lsx_xor_syndrome,
+ raid6_has_lsx,
+ "lsx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+#define NSIZE 32
+
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr6");
+ asm volatile("xvxor.v $xr3, $xr9, $xr7");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "xvld $xr10, %0\n\t"
+ "xvld $xr11, %1\n\t"
+ "xvld $xr12, %2\n\t"
+ "xvld $xr13, %3\n\t"
+ "xvxor.v $xr10, $xr10, $xr0\n\t"
+ "xvxor.v $xr11, $xr11, $xr1\n\t"
+ "xvxor.v $xr12, $xr12, $xr2\n\t"
+ "xvxor.v $xr13, $xr13, $xr3\n\t"
+ "xvst $xr10, %0\n\t"
+ "xvst $xr11, %1\n\t"
+ "xvst $xr12, %2\n\t"
+ "xvst $xr13, %3\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lasx = {
+ raid6_lasx_gen_syndrome,
+ raid6_lasx_xor_syndrome,
+ raid6_has_lasx,
+ "lasx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
new file mode 100644
index 000000000..3be037932
--- /dev/null
+++ b/lib/raid6/mktables.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * mktables.c
+ *
+ * Make RAID-6 tables. This is a host user space program to be run at
+ * compile time.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <time.h>
+
+static uint8_t gfmul(uint8_t a, uint8_t b)
+{
+ uint8_t v = 0;
+
+ while (b) {
+ if (b & 1)
+ v ^= a;
+ a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
+ b >>= 1;
+ }
+
+ return v;
+}
+
+static uint8_t gfpow(uint8_t a, int b)
+{
+ uint8_t v = 1;
+
+ b %= 255;
+ if (b < 0)
+ b += 255;
+
+ while (b) {
+ if (b & 1)
+ v = gfmul(v, a);
+ a = gfmul(a, a);
+ b >>= 1;
+ }
+
+ return v;
+}
+
+int main(int argc, char *argv[])
+{
+ int i, j, k;
+ uint8_t v;
+ uint8_t exptbl[256], invtbl[256];
+
+ printf("#ifdef __KERNEL__\n");
+ printf("#include <linux/export.h>\n");
+ printf("#endif\n");
+ printf("#include <linux/raid/pq.h>\n");
+
+ /* Compute multiplication table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfmul[256][256] =\n"
+ "{\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t{\n");
+ for (j = 0; j < 256; j += 8) {
+ printf("\t\t");
+ for (k = 0; k < 8; k++)
+ printf("0x%02x,%c", gfmul(i, j + k),
+ (k == 7) ? '\n' : ' ');
+ }
+ printf("\t},\n");
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gfmul);\n");
+ printf("#endif\n");
+
+ /* Compute vector multiplication table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_vgfmul[256][32] =\n"
+ "{\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t{\n");
+ for (j = 0; j < 16; j += 8) {
+ printf("\t\t");
+ for (k = 0; k < 8; k++)
+ printf("0x%02x,%c", gfmul(i, j + k),
+ (k == 7) ? '\n' : ' ');
+ }
+ for (j = 0; j < 16; j += 8) {
+ printf("\t\t");
+ for (k = 0; k < 8; k++)
+ printf("0x%02x,%c", gfmul(i, (j + k) << 4),
+ (k == 7) ? '\n' : ' ');
+ }
+ printf("\t},\n");
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_vgfmul);\n");
+ printf("#endif\n");
+
+ /* Compute power-of-2 table (exponent) */
+ v = 1;
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfexp[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ exptbl[i + j] = v;
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ v = gfmul(v, 2);
+ if (v == 1)
+ v = 0; /* For entry 255, not a real entry */
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gfexp);\n");
+ printf("#endif\n");
+
+ /* Compute log-of-2 table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gflog[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ v = 255;
+ for (k = 0; k < 256; k++)
+ if (exptbl[k] == (i + j)) {
+ v = k;
+ break;
+ }
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gflog);\n");
+ printf("#endif\n");
+
+ /* Compute inverse table x^-1 == x^254 */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfinv[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ invtbl[i + j] = v = gfpow(i + j, 254);
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gfinv);\n");
+ printf("#endif\n");
+
+ /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfexi[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++)
+ printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
+ (j == 7) ? '\n' : ' ');
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gfexi);\n");
+ printf("#endif\n");
+
+ return 0;
+}
diff --git a/lib/raid6/mmx.c b/lib/raid6/mmx.c
new file mode 100644
index 000000000..3a5bf53a2
--- /dev/null
+++ b/lib/raid6/mmx.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6/mmx.c
+ *
+ * MMX implementation of RAID-6 syndrome functions
+ */
+
+#ifdef CONFIG_X86_32
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+/* Shared with raid6/sse1.c */
+const struct raid6_mmx_constants {
+ u64 x1d;
+} raid6_mmx_constants = {
+ 0x1d1d1d1d1d1d1d1dULL,
+};
+
+static int raid6_have_mmx(void)
+{
+ /* Not really "boot_cpu" but "all_cpus" */
+ return boot_cpu_has(X86_FEATURE_MMX);
+}
+
+/*
+ * Plain MMX implementation
+ */
+static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
+ asm volatile("pxor %mm5,%mm5"); /* Zero temp */
+
+ for ( d = 0 ; d < bytes ; d += 8 ) {
+ asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("movq %mm2,%mm4"); /* Q[0] */
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
+ asm volatile("pcmpgtb %mm4,%mm5");
+ asm volatile("paddb %mm4,%mm4");
+ asm volatile("pand %mm0,%mm5");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm5,%mm5");
+ asm volatile("pxor %mm6,%mm2");
+ asm volatile("pxor %mm6,%mm4");
+ }
+ asm volatile("movq %%mm2,%0" : "=m" (p[d]));
+ asm volatile("pxor %mm2,%mm2");
+ asm volatile("movq %%mm4,%0" : "=m" (q[d]));
+ asm volatile("pxor %mm4,%mm4");
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_mmxx1 = {
+ raid6_mmx1_gen_syndrome,
+ NULL, /* XOR not yet implemented */
+ raid6_have_mmx,
+ "mmxx1",
+ 0
+};
+
+/*
+ * Unrolled-by-2 MMX implementation
+ */
+static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
+ asm volatile("pxor %mm5,%mm5"); /* Zero temp */
+ asm volatile("pxor %mm7,%mm7"); /* Zero temp */
+
+ for ( d = 0 ; d < bytes ; d += 16 ) {
+ asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8]));
+ asm volatile("movq %mm2,%mm4"); /* Q[0] */
+ asm volatile("movq %mm3,%mm6"); /* Q[1] */
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ asm volatile("pcmpgtb %mm4,%mm5");
+ asm volatile("pcmpgtb %mm6,%mm7");
+ asm volatile("paddb %mm4,%mm4");
+ asm volatile("paddb %mm6,%mm6");
+ asm volatile("pand %mm0,%mm5");
+ asm volatile("pand %mm0,%mm7");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm7,%mm6");
+ asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
+ asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
+ asm volatile("pxor %mm5,%mm2");
+ asm volatile("pxor %mm7,%mm3");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm7,%mm6");
+ asm volatile("pxor %mm5,%mm5");
+ asm volatile("pxor %mm7,%mm7");
+ }
+ asm volatile("movq %%mm2,%0" : "=m" (p[d]));
+ asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
+ asm volatile("movq %%mm4,%0" : "=m" (q[d]));
+ asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_mmxx2 = {
+ raid6_mmx2_gen_syndrome,
+ NULL, /* XOR not yet implemented */
+ raid6_have_mmx,
+ "mmxx2",
+ 0
+};
+
+#endif
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
new file mode 100644
index 000000000..0a2e76035
--- /dev/null
+++ b/lib/raid6/neon.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef __KERNEL__
+#include <asm/neon.h>
+#else
+#define kernel_neon_begin()
+#define kernel_neon_end()
+#define cpu_has_neon() (1)
+#endif
+
+/*
+ * There are 2 reasons these wrappers are kept in a separate compilation unit
+ * from the actual implementations in neonN.c (generated from neon.uc by
+ * unroll.awk):
+ * - the actual implementations use NEON intrinsics, and the GCC support header
+ * (arm_neon.h) is not fully compatible (type wise) with the kernel;
+ * - the neonN.c files are compiled with -mfpu=neon and optimization enabled,
+ * and we have to make sure that we never use *any* NEON/VFP instructions
+ * outside a kernel_neon_begin()/kernel_neon_end() pair.
+ */
+
+#define RAID6_NEON_WRAPPER(_n) \
+ static void raid6_neon ## _n ## _gen_syndrome(int disks, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_neon ## _n ## _gen_syndrome_real(int, \
+ unsigned long, void**); \
+ kernel_neon_begin(); \
+ raid6_neon ## _n ## _gen_syndrome_real(disks, \
+ (unsigned long)bytes, ptrs); \
+ kernel_neon_end(); \
+ } \
+ static void raid6_neon ## _n ## _xor_syndrome(int disks, \
+ int start, int stop, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_neon ## _n ## _xor_syndrome_real(int, \
+ int, int, unsigned long, void**); \
+ kernel_neon_begin(); \
+ raid6_neon ## _n ## _xor_syndrome_real(disks, \
+ start, stop, (unsigned long)bytes, ptrs); \
+ kernel_neon_end(); \
+ } \
+ struct raid6_calls const raid6_neonx ## _n = { \
+ raid6_neon ## _n ## _gen_syndrome, \
+ raid6_neon ## _n ## _xor_syndrome, \
+ raid6_have_neon, \
+ "neonx" #_n, \
+ 0 \
+ }
+
+static int raid6_have_neon(void)
+{
+ return cpu_has_neon();
+}
+
+RAID6_NEON_WRAPPER(1);
+RAID6_NEON_WRAPPER(2);
+RAID6_NEON_WRAPPER(4);
+RAID6_NEON_WRAPPER(8);
diff --git a/lib/raid6/neon.h b/lib/raid6/neon.h
new file mode 100644
index 000000000..2ca41ee9b
--- /dev/null
+++ b/lib/raid6/neon.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon1_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon2_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon4_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs);
+void raid6_neon8_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs);
+void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
+ uint8_t *dq, const uint8_t *pbmul,
+ const uint8_t *qmul);
+
+void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
+ const uint8_t *qmul);
+
+
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
new file mode 100644
index 000000000..355270af0
--- /dev/null
+++ b/lib/raid6/neon.uc
@@ -0,0 +1,153 @@
+/* -----------------------------------------------------------------------
+ *
+ * neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
+ *
+ * Copyright (C) 2012 Rob Herring
+ * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on altivec.uc:
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * neon$#.c
+ *
+ * $#-way unrolled NEON intrinsics math RAID-6 instruction set
+ *
+ * This file is postprocessed using unroll.awk
+ */
+
+#include <arm_neon.h>
+#include "neon.h"
+
+typedef uint8x16_t unative_t;
+
+#define NSIZE sizeof(unative_t)
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline unative_t SHLBYTE(unative_t v)
+{
+ return vshlq_n_u8(v, 1);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline unative_t MASK(unative_t v)
+{
+ return (unative_t)vshrq_n_s8((int8x16_t)v, 7);
+}
+
+static inline unative_t PMUL(unative_t v, unative_t u)
+{
+ return (unative_t)vmulq_p8((poly8x16_t)v, (poly8x16_t)u);
+}
+
+void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+ uint8_t **dptr = (uint8_t **)ptrs;
+ uint8_t *p, *q;
+ int d, z, z0;
+
+ register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ const unative_t x1d = vdupq_n_u8(0x1d);
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+ wp$$ = veorq_u8(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ w1$$ = veorq_u8(w1$$, w2$$);
+ wq$$ = veorq_u8(w1$$, wd$$);
+ }
+ vst1q_u8(&p[d+NSIZE*$$], wp$$);
+ vst1q_u8(&q[d+NSIZE*$$], wq$$);
+ }
+}
+
+void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
+ unsigned long bytes, void **ptrs)
+{
+ uint8_t **dptr = (uint8_t **)ptrs;
+ uint8_t *p, *q;
+ int d, z, z0;
+
+ register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ const unative_t x1d = vdupq_n_u8(0x1d);
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+ wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$);
+
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+ wp$$ = veorq_u8(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ w1$$ = veorq_u8(w1$$, w2$$);
+ wq$$ = veorq_u8(w1$$, wd$$);
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 3 ; z -= 4 ) {
+ w2$$ = vshrq_n_u8(wq$$, 4);
+ w1$$ = vshlq_n_u8(wq$$, 4);
+
+ w2$$ = PMUL(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ }
+
+ switch (z) {
+ case 2:
+ w2$$ = vshrq_n_u8(wq$$, 5);
+ w1$$ = vshlq_n_u8(wq$$, 3);
+
+ w2$$ = PMUL(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ break;
+ case 1:
+ w2$$ = vshrq_n_u8(wq$$, 6);
+ w1$$ = vshlq_n_u8(wq$$, 2);
+
+ w2$$ = PMUL(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ break;
+ case 0:
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ wq$$ = veorq_u8(w1$$, w2$$);
+ }
+ w1$$ = vld1q_u8(&q[d+NSIZE*$$]);
+ wq$$ = veorq_u8(wq$$, w1$$);
+
+ vst1q_u8(&p[d+NSIZE*$$], wp$$);
+ vst1q_u8(&q[d+NSIZE*$$], wq$$);
+ }
+}
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
new file mode 100644
index 000000000..a7c1b2bbe
--- /dev/null
+++ b/lib/raid6/recov.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6/recov.c
+ *
+ * RAID-6 data recovery in dual failure mode. In single failure mode,
+ * use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
+ * the syndrome.)
+ */
+
+#include <linux/raid/pq.h>
+
+/* Recover two failed data blocks. */
+static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ u8 px, qx, db;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
+
+ /* Now do it... */
+ while ( bytes-- ) {
+ px = *p ^ *dp;
+ qx = qmul[*q ^ *dq];
+ *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
+ *dp++ = db ^ px; /* Reconstructed A */
+ p++; q++;
+ }
+}
+
+/* Recover failure of one data block plus the P block */
+static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ /* Now do it... */
+ while ( bytes-- ) {
+ *p++ ^= *dq = qmul[*q ^ *dq];
+ q++; dq++;
+ }
+}
+
+
+const struct raid6_recov_calls raid6_recov_intx1 = {
+ .data2 = raid6_2data_recov_intx1,
+ .datap = raid6_datap_recov_intx1,
+ .valid = NULL,
+ .name = "intx1",
+ .priority = 0,
+};
+
+#ifndef __KERNEL__
+/* Testing only */
+
+/* Recover two failed blocks. */
+void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
+{
+ if ( faila > failb ) {
+ int tmp = faila;
+ faila = failb;
+ failb = tmp;
+ }
+
+ if ( failb == disks-1 ) {
+ if ( faila == disks-2 ) {
+ /* P+Q failure. Just rebuild the syndrome. */
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+ } else {
+ /* data+Q failure. Reconstruct data from P,
+ then rebuild syndrome. */
+ /* NOT IMPLEMENTED - equivalent to RAID-5 */
+ }
+ } else {
+ if ( failb == disks-2 ) {
+ /* data+P failure. */
+ raid6_datap_recov(disks, bytes, faila, ptrs);
+ } else {
+ /* data+data failure. */
+ raid6_2data_recov(disks, bytes, faila, failb, ptrs);
+ }
+ }
+}
+
+#endif
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c
new file mode 100644
index 000000000..4e8095403
--- /dev/null
+++ b/lib/raid6/recov_avx2.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ */
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static int raid6_has_avx2(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX);
+}
+
+static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /* ymm0 = x0f[16] */
+ asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
+ asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
+ asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
+ asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
+ asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
+ asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
+ asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
+ asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
+
+ /*
+ * 1 = dq[0] ^ q[0]
+ * 9 = dq[32] ^ q[32]
+ * 0 = dp[0] ^ p[0]
+ * 8 = dp[32] ^ p[32]
+ */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %ymm1, %ymm3");
+ asm volatile("vpsraw $4, %ymm9, %ymm12");
+ asm volatile("vpand %ymm7, %ymm1, %ymm1");
+ asm volatile("vpand %ymm7, %ymm9, %ymm9");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpand %ymm7, %ymm12, %ymm12");
+ asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
+ asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
+ asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
+ asm volatile("vpxor %ymm14, %ymm15, %ymm15");
+ asm volatile("vpxor %ymm4, %ymm5, %ymm5");
+
+ /*
+ * 5 = qx[0]
+ * 15 = qx[32]
+ */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
+ asm volatile("vpsraw $4, %ymm0, %ymm2");
+ asm volatile("vpsraw $4, %ymm8, %ymm6");
+ asm volatile("vpand %ymm7, %ymm0, %ymm3");
+ asm volatile("vpand %ymm7, %ymm8, %ymm14");
+ asm volatile("vpand %ymm7, %ymm2, %ymm2");
+ asm volatile("vpand %ymm7, %ymm6, %ymm6");
+ asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
+ asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
+ asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm4, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm12, %ymm13, %ymm13");
+
+ /*
+ * 1 = pbmul[px[0]]
+ * 13 = pbmul[px[32]]
+ */
+ asm volatile("vpxor %ymm5, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm15, %ymm13, %ymm13");
+
+ /*
+ * 1 = db = DQ
+ * 13 = db[32] = DQ[32]
+ */
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+ asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
+ asm volatile("vpxor %ymm1, %ymm0, %ymm0");
+ asm volatile("vpxor %ymm13, %ymm8, %ymm8");
+
+ asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
+ asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+#else
+ asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
+ asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
+ asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
+ asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
+
+ /* 1 = dq ^ q; 0 = dp ^ p */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
+
+ /*
+ * 1 = dq ^ q
+ * 3 = dq ^ p >> 4
+ */
+ asm volatile("vpsraw $4, %ymm1, %ymm3");
+ asm volatile("vpand %ymm7, %ymm1, %ymm1");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
+ asm volatile("vpxor %ymm4, %ymm5, %ymm5");
+
+ /* 5 = qx */
+
+ asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
+
+ asm volatile("vpsraw $4, %ymm0, %ymm2");
+ asm volatile("vpand %ymm7, %ymm0, %ymm3");
+ asm volatile("vpand %ymm7, %ymm2, %ymm2");
+ asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
+ asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm4, %ymm1, %ymm1");
+
+ /* 1 = pbmul[px] */
+ asm volatile("vpxor %ymm5, %ymm1, %ymm1");
+ /* 1 = db = DQ */
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+
+ asm volatile("vpxor %ymm1, %ymm0, %ymm0");
+ asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
+
+ bytes -= 32;
+ p += 32;
+ q += 32;
+ dp += 32;
+ dq += 32;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
+ asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
+ asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
+ asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
+
+ /*
+ * 3 = q[0] ^ dq[0]
+ * 8 = q[32] ^ dq[32]
+ */
+ asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
+ asm volatile("vmovapd %ymm0, %ymm13");
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
+ asm volatile("vmovapd %ymm1, %ymm14");
+
+ asm volatile("vpsraw $4, %ymm3, %ymm6");
+ asm volatile("vpsraw $4, %ymm8, %ymm12");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpand %ymm7, %ymm8, %ymm8");
+ asm volatile("vpand %ymm7, %ymm6, %ymm6");
+ asm volatile("vpand %ymm7, %ymm12, %ymm12");
+ asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
+ asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
+ asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
+ asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
+ asm volatile("vpxor %ymm0, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm13, %ymm14, %ymm14");
+
+ /*
+ * 1 = qmul[q[0] ^ dq[0]]
+ * 14 = qmul[q[32] ^ dq[32]]
+ */
+ asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
+ asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
+ asm volatile("vpxor %ymm1, %ymm2, %ymm2");
+ asm volatile("vpxor %ymm14, %ymm12, %ymm12");
+
+ /*
+ * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
+ * 12 = p[32] ^ qmul[q[32] ^ dq[32]]
+ */
+
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+ asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
+ asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
+ asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+#else
+ asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
+ asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
+
+ /* 3 = q ^ dq */
+
+ asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
+ asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %ymm3, %ymm6");
+ asm volatile("vpand %ymm7, %ymm3, %ymm3");
+ asm volatile("vpand %ymm7, %ymm6, %ymm6");
+ asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
+ asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
+ asm volatile("vpxor %ymm0, %ymm1, %ymm1");
+
+ /* 1 = qmul[q ^ dq] */
+
+ asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
+ asm volatile("vpxor %ymm1, %ymm2, %ymm2");
+
+ /* 2 = p ^ qmul[q ^ dq] */
+
+ asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
+ asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
+
+ bytes -= 32;
+ p += 32;
+ q += 32;
+ dq += 32;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_avx2 = {
+ .data2 = raid6_2data_recov_avx2,
+ .datap = raid6_datap_recov_avx2,
+ .valid = raid6_has_avx2,
+#ifdef CONFIG_X86_64
+ .name = "avx2x2",
+#else
+ .name = "avx2x1",
+#endif
+ .priority = 2,
+};
diff --git a/lib/raid6/recov_avx512.c b/lib/raid6/recov_avx512.c
new file mode 100644
index 000000000..fd9e15bf3
--- /dev/null
+++ b/lib/raid6/recov_avx512.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Author: Megha Dey <megha.dey@linux.intel.com>
+ */
+
+#ifdef CONFIG_AS_AVX512
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static int raid6_has_avx512(void)
+{
+ return boot_cpu_has(X86_FEATURE_AVX2) &&
+ boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX512F) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW) &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512DQ);
+}
+
+static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /* zmm0 = x0f[16] */
+ asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa64 %0, %%zmm1\n\t"
+ "vmovdqa64 %1, %%zmm9\n\t"
+ "vmovdqa64 %2, %%zmm0\n\t"
+ "vmovdqa64 %3, %%zmm8\n\t"
+ "vpxorq %4, %%zmm1, %%zmm1\n\t"
+ "vpxorq %5, %%zmm9, %%zmm9\n\t"
+ "vpxorq %6, %%zmm0, %%zmm0\n\t"
+ "vpxorq %7, %%zmm8, %%zmm8"
+ :
+ : "m" (q[0]), "m" (q[64]), "m" (p[0]),
+ "m" (p[64]), "m" (dq[0]), "m" (dq[64]),
+ "m" (dp[0]), "m" (dp[64]));
+
+ /*
+ * 1 = dq[0] ^ q[0]
+ * 9 = dq[64] ^ q[64]
+ * 0 = dp[0] ^ p[0]
+ * 8 = dp[64] ^ p[64]
+ */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm5"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
+ "vpsraw $4, %%zmm9, %%zmm12\n\t"
+ "vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
+ "vpandq %%zmm7, %%zmm9, %%zmm9\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
+ "vpshufb %%zmm9, %%zmm4, %%zmm14\n\t"
+ "vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm12, %%zmm5, %%zmm15\n\t"
+ "vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
+ "vpxorq %%zmm14, %%zmm15, %%zmm15\n\t"
+ "vpxorq %%zmm4, %%zmm5, %%zmm5"
+ :
+ : );
+
+ /*
+ * 5 = qx[0]
+ * 15 = qx[64]
+ */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm1\n\t"
+ "vpsraw $4, %%zmm0, %%zmm2\n\t"
+ "vpsraw $4, %%zmm8, %%zmm6\n\t"
+ "vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm8, %%zmm14\n\t"
+ "vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
+ "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
+ "vpshufb %%zmm14, %%zmm4, %%zmm12\n\t"
+ "vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm6, %%zmm1, %%zmm13\n\t"
+ "vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm4, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm12, %%zmm13, %%zmm13"
+ :
+ : "m" (pbmul[0]), "m" (pbmul[16]));
+
+ /*
+ * 1 = pbmul[px[0]]
+ * 13 = pbmul[px[64]]
+ */
+ asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm15, %%zmm13, %%zmm13"
+ :
+ : );
+
+ /*
+ * 1 = db = DQ
+ * 13 = db[64] = DQ[64]
+ */
+ asm volatile("vmovdqa64 %%zmm1, %0\n\t"
+ "vmovdqa64 %%zmm13,%1\n\t"
+ "vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
+ "vpxorq %%zmm13, %%zmm8, %%zmm8"
+ :
+ : "m" (dq[0]), "m" (dq[64]));
+
+ asm volatile("vmovdqa64 %%zmm0, %0\n\t"
+ "vmovdqa64 %%zmm8, %1"
+ :
+ : "m" (dp[0]), "m" (dp[64]));
+
+ bytes -= 128;
+ p += 128;
+ q += 128;
+ dp += 128;
+ dq += 128;
+#else
+ asm volatile("vmovdqa64 %0, %%zmm1\n\t"
+ "vmovdqa64 %1, %%zmm0\n\t"
+ "vpxorq %2, %%zmm1, %%zmm1\n\t"
+ "vpxorq %3, %%zmm0, %%zmm0"
+ :
+ : "m" (*q), "m" (*p), "m"(*dq), "m" (*dp));
+
+ /* 1 = dq ^ q; 0 = dp ^ p */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm5"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ /*
+ * 1 = dq ^ q
+ * 3 = dq ^ p >> 4
+ */
+ asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
+ "vpxorq %%zmm4, %%zmm5, %%zmm5"
+ :
+ : );
+
+ /* 5 = qx */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
+ "vbroadcasti64x2 %1, %%zmm1"
+ :
+ : "m" (pbmul[0]), "m" (pbmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm0, %%zmm2\n\t"
+ "vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
+ "vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
+ "vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm4, %%zmm1, %%zmm1"
+ :
+ : );
+
+ /* 1 = pbmul[px] */
+ asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
+ /* 1 = db = DQ */
+ "vmovdqa64 %%zmm1, %0\n\t"
+ :
+ : "m" (dq[0]));
+
+ asm volatile("vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
+ "vmovdqa64 %%zmm0, %0"
+ :
+ : "m" (dp[0]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ const u8 x0f = 0x0f;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("vmovdqa64 %0, %%zmm3\n\t"
+ "vmovdqa64 %1, %%zmm8\n\t"
+ "vpxorq %2, %%zmm3, %%zmm3\n\t"
+ "vpxorq %3, %%zmm8, %%zmm8"
+ :
+ : "m" (dq[0]), "m" (dq[64]), "m" (q[0]),
+ "m" (q[64]));
+
+ /*
+ * 3 = q[0] ^ dq[0]
+ * 8 = q[64] ^ dq[64]
+ */
+ asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
+ "vmovapd %%zmm0, %%zmm13\n\t"
+ "vbroadcasti64x2 %1, %%zmm1\n\t"
+ "vmovapd %%zmm1, %%zmm14"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
+ "vpsraw $4, %%zmm8, %%zmm12\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm8, %%zmm8\n\t"
+ "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
+ "vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
+ "vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
+ "vpshufb %%zmm8, %%zmm13, %%zmm13\n\t"
+ "vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
+ "vpshufb %%zmm12, %%zmm14, %%zmm14\n\t"
+ "vpxorq %%zmm0, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm13, %%zmm14, %%zmm14"
+ :
+ : );
+
+ /*
+ * 1 = qmul[q[0] ^ dq[0]]
+ * 14 = qmul[q[64] ^ dq[64]]
+ */
+ asm volatile("vmovdqa64 %0, %%zmm2\n\t"
+ "vmovdqa64 %1, %%zmm12\n\t"
+ "vpxorq %%zmm1, %%zmm2, %%zmm2\n\t"
+ "vpxorq %%zmm14, %%zmm12, %%zmm12"
+ :
+ : "m" (p[0]), "m" (p[64]));
+
+ /*
+ * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
+ * 12 = p[64] ^ qmul[q[64] ^ dq[64]]
+ */
+
+ asm volatile("vmovdqa64 %%zmm1, %0\n\t"
+ "vmovdqa64 %%zmm14, %1\n\t"
+ "vmovdqa64 %%zmm2, %2\n\t"
+ "vmovdqa64 %%zmm12,%3"
+ :
+ : "m" (dq[0]), "m" (dq[64]), "m" (p[0]),
+ "m" (p[64]));
+
+ bytes -= 128;
+ p += 128;
+ q += 128;
+ dq += 128;
+#else
+ asm volatile("vmovdqa64 %0, %%zmm3\n\t"
+ "vpxorq %1, %%zmm3, %%zmm3"
+ :
+ : "m" (dq[0]), "m" (q[0]));
+
+ /* 3 = q ^ dq */
+
+ asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
+ "vbroadcasti64x2 %1, %%zmm1"
+ :
+ : "m" (qmul[0]), "m" (qmul[16]));
+
+ asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
+ "vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
+ "vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
+ "vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
+ "vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
+ "vpxorq %%zmm0, %%zmm1, %%zmm1"
+ :
+ : );
+
+ /* 1 = qmul[q ^ dq] */
+
+ asm volatile("vmovdqa64 %0, %%zmm2\n\t"
+ "vpxorq %%zmm1, %%zmm2, %%zmm2"
+ :
+ : "m" (p[0]));
+
+ /* 2 = p ^ qmul[q ^ dq] */
+
+ asm volatile("vmovdqa64 %%zmm1, %0\n\t"
+ "vmovdqa64 %%zmm2, %1"
+ :
+ : "m" (dq[0]), "m" (p[0]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_avx512 = {
+ .data2 = raid6_2data_recov_avx512,
+ .datap = raid6_datap_recov_avx512,
+ .valid = raid6_has_avx512,
+#ifdef CONFIG_X86_64
+ .name = "avx512x2",
+#else
+ .name = "avx512x1",
+#endif
+ .priority = 3,
+};
+
+#else
+#warning "your version of binutils lacks AVX512 support"
+#endif
diff --git a/lib/raid6/recov_loongarch_simd.c b/lib/raid6/recov_loongarch_simd.c
new file mode 100644
index 000000000..94aeac85e
--- /dev/null
+++ b/lib/raid6/recov_loongarch_simd.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RAID6 recovery algorithms in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Originally based on recov_avx2.c and recov_ssse3.c:
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * Unlike with the syndrome calculation algorithms, there's no boot-time
+ * selection of recovery algorithms by benchmarking, so we have to specify
+ * the priorities and hope the future cores will all have decent vector
+ * support (i.e. no LASX slower than LSX, or even scalar code).
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * vr20, vr21: qmul
+ * vr22, vr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+
+ while (bytes) {
+ /* vr4 - vr7: Q */
+ asm volatile("vld $vr4, %0" : : "m" (q[0]));
+ asm volatile("vld $vr5, %0" : : "m" (q[16]));
+ asm volatile("vld $vr6, %0" : : "m" (q[32]));
+ asm volatile("vld $vr7, %0" : : "m" (q[48]));
+ /* vr4 - vr7: Q + Qxy */
+ asm volatile("vld $vr8, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dq[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ /* vr0 - vr3: P */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr0 - vr3: P + Pxy */
+ asm volatile("vld $vr8, %0" : : "m" (dp[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dp[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dp[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dp[48]));
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr20, $vr20, $vr4");
+ asm volatile("vshuf.b $vr5, $vr20, $vr20, $vr5");
+ asm volatile("vshuf.b $vr6, $vr20, $vr20, $vr6");
+ asm volatile("vshuf.b $vr7, $vr20, $vr20, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr21, $vr21, $vr8");
+ asm volatile("vshuf.b $vr9, $vr21, $vr21, $vr9");
+ asm volatile("vshuf.b $vr10, $vr21, $vr21, $vr10");
+ asm volatile("vshuf.b $vr11, $vr21, $vr21, $vr11");
+ /* vr16 - vr19: B(Q + Qxy) */
+ asm volatile("vxor.v $vr16, $vr8, $vr4");
+ asm volatile("vxor.v $vr17, $vr9, $vr5");
+ asm volatile("vxor.v $vr18, $vr10, $vr6");
+ asm volatile("vxor.v $vr19, $vr11, $vr7");
+
+ /* vr4 - vr7: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("vsrli.b $vr4, $vr0, 4");
+ asm volatile("vsrli.b $vr5, $vr1, 4");
+ asm volatile("vsrli.b $vr6, $vr2, 4");
+ asm volatile("vsrli.b $vr7, $vr3, 4");
+ /* vr12 - vr15: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("vandi.b $vr12, $vr0, 0x0f");
+ asm volatile("vandi.b $vr13, $vr1, 0x0f");
+ asm volatile("vandi.b $vr14, $vr2, 0x0f");
+ asm volatile("vandi.b $vr15, $vr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("vshuf.b $vr12, $vr22, $vr22, $vr12");
+ asm volatile("vshuf.b $vr13, $vr22, $vr22, $vr13");
+ asm volatile("vshuf.b $vr14, $vr22, $vr22, $vr14");
+ asm volatile("vshuf.b $vr15, $vr22, $vr22, $vr15");
+ /* lookup from pbmul[16] */
+ asm volatile("vshuf.b $vr4, $vr23, $vr23, $vr4");
+ asm volatile("vshuf.b $vr5, $vr23, $vr23, $vr5");
+ asm volatile("vshuf.b $vr6, $vr23, $vr23, $vr6");
+ asm volatile("vshuf.b $vr7, $vr23, $vr23, $vr7");
+ /* vr4 - vr7: A(P + Pxy) */
+ asm volatile("vxor.v $vr4, $vr4, $vr12");
+ asm volatile("vxor.v $vr5, $vr5, $vr13");
+ asm volatile("vxor.v $vr6, $vr6, $vr14");
+ asm volatile("vxor.v $vr7, $vr7, $vr15");
+
+ /* vr4 - vr7: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr16");
+ asm volatile("vxor.v $vr5, $vr5, $vr17");
+ asm volatile("vxor.v $vr6, $vr6, $vr18");
+ asm volatile("vxor.v $vr7, $vr7, $vr19");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Pxy + Dx = Dy */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (dp[0]));
+ asm volatile("vst $vr1, %0" : "=m" (dp[16]));
+ asm volatile("vst $vr2, %0" : "=m" (dp[32]));
+ asm volatile("vst $vr3, %0" : "=m" (dp[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* vr22, vr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+
+ while (bytes) {
+ /* vr0 - vr3: P + Dx */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr4 - vr7: Qx */
+ asm volatile("vld $vr4, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr5, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr6, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr7, %0" : : "m" (dq[48]));
+ /* vr4 - vr7: Q + Qx */
+ asm volatile("vld $vr8, %0" : : "m" (q[0]));
+ asm volatile("vld $vr9, %0" : : "m" (q[16]));
+ asm volatile("vld $vr10, %0" : : "m" (q[32]));
+ asm volatile("vld $vr11, %0" : : "m" (q[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr22, $vr22, $vr4");
+ asm volatile("vshuf.b $vr5, $vr22, $vr22, $vr5");
+ asm volatile("vshuf.b $vr6, $vr22, $vr22, $vr6");
+ asm volatile("vshuf.b $vr7, $vr22, $vr22, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr23, $vr23, $vr8");
+ asm volatile("vshuf.b $vr9, $vr23, $vr23, $vr9");
+ asm volatile("vshuf.b $vr10, $vr23, $vr23, $vr10");
+ asm volatile("vshuf.b $vr11, $vr23, $vr23, $vr11");
+ /* vr4 - vr7: qmul(Q + Qx) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Dx + Dx = P */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (p[0]));
+ asm volatile("vst $vr1, %0" : "=m" (p[16]));
+ asm volatile("vst $vr2, %0" : "=m" (p[32]));
+ asm volatile("vst $vr3, %0" : "=m" (p[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lsx = {
+ .data2 = raid6_2data_recov_lsx,
+ .datap = raid6_datap_recov_lsx,
+ .valid = raid6_has_lsx,
+ .name = "lsx",
+ .priority = 1,
+};
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * xr20, xr21: qmul
+ * xr22, xr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+ asm volatile("xvreplve0.q $xr20, $xr20");
+ asm volatile("xvreplve0.q $xr21, $xr21");
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: Q */
+ asm volatile("xvld $xr0, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (q[32]));
+ /* xr0, xr1: Q + Qxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dq[32]));
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* xr2, xr3: P */
+ asm volatile("xvld $xr2, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (p[32]));
+ /* xr2, xr3: P + Pxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dp[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dp[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvsrli.b $xr4, $xr0, 4");
+ asm volatile("xvsrli.b $xr5, $xr1, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvandi.b $xr0, $xr0, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr1, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr20, $xr20, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr20, $xr20, $xr1");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr21, $xr21, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr21, $xr21, $xr5");
+ /* xr6, xr7: B(Q + Qxy) */
+ asm volatile("xvxor.v $xr6, $xr4, $xr0");
+ asm volatile("xvxor.v $xr7, $xr5, $xr1");
+
+ /* xr4, xr5: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvandi.b $xr0, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr22, $xr22, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr22, $xr22, $xr1");
+ /* lookup from pbmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr0, xr1: A(P + Pxy) */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+
+ /* xr0, xr1: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("xvxor.v $xr0, $xr0, $xr6");
+ asm volatile("xvxor.v $xr1, $xr1, $xr7");
+
+ /* xr2, xr3: P + Pxy + Dx = Dy */
+ asm volatile("xvxor.v $xr2, $xr2, $xr0");
+ asm volatile("xvxor.v $xr3, $xr3, $xr1");
+
+ asm volatile("xvst $xr0, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr2, %0" : "=m" (dp[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dp[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* xr22, xr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: P + Dx */
+ asm volatile("xvld $xr0, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (p[32]));
+ /* xr2, xr3: Qx */
+ asm volatile("xvld $xr2, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (dq[32]));
+ /* xr2, xr3: Q + Qx */
+ asm volatile("xvld $xr4, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (q[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr2, xr3: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvandi.b $xr2, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr3, $xr3, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr2, $xr22, $xr22, $xr2");
+ asm volatile("xvshuf.b $xr3, $xr22, $xr22, $xr3");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr2, xr3: qmul(Q + Qx) = Dx */
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr0, xr1: P + Dx + Dx = P */
+ asm volatile("xvxor.v $xr0, $xr0, $xr2");
+ asm volatile("xvxor.v $xr1, $xr1, $xr3");
+
+ asm volatile("xvst $xr2, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr0, %0" : "=m" (p[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (p[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lasx = {
+ .data2 = raid6_2data_recov_lasx,
+ .datap = raid6_datap_recov_lasx,
+ .valid = raid6_has_lasx,
+ .name = "lasx",
+ .priority = 2,
+};
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/recov_neon.c b/lib/raid6/recov_neon.c
new file mode 100644
index 000000000..1bfc14174
--- /dev/null
+++ b/lib/raid6/recov_neon.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef __KERNEL__
+#include <asm/neon.h>
+#include "neon.h"
+#else
+#define kernel_neon_begin()
+#define kernel_neon_end()
+#define cpu_has_neon() (1)
+#endif
+
+static int raid6_has_neon(void)
+{
+ return cpu_has_neon();
+}
+
+static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_neon_begin();
+ __raid6_2data_recov_neon(bytes, p, q, dp, dq, pbmul, qmul);
+ kernel_neon_end();
+}
+
+static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_neon_begin();
+ __raid6_datap_recov_neon(bytes, p, q, dq, qmul);
+ kernel_neon_end();
+}
+
+const struct raid6_recov_calls raid6_recov_neon = {
+ .data2 = raid6_2data_recov_neon,
+ .datap = raid6_datap_recov_neon,
+ .valid = raid6_has_neon,
+ .name = "neon",
+ .priority = 10,
+};
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c
new file mode 100644
index 000000000..f9e7e8f5a
--- /dev/null
+++ b/lib/raid6/recov_neon_inner.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Intel Corporation
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+#include <arm_neon.h>
+#include "neon.h"
+
+#ifdef CONFIG_ARM
+/*
+ * AArch32 does not provide this intrinsic natively because it does not
+ * implement the underlying instruction. AArch32 only provides a 64-bit
+ * wide vtbl.8 instruction, so use that instead.
+ */
+static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
+{
+ union {
+ uint8x16_t val;
+ uint8x8x2_t pair;
+ } __a = { a };
+
+ return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
+ vtbl2_u8(__a.pair, vget_high_u8(b)));
+}
+#endif
+
+void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
+ uint8_t *dq, const uint8_t *pbmul,
+ const uint8_t *qmul)
+{
+ uint8x16_t pm0 = vld1q_u8(pbmul);
+ uint8x16_t pm1 = vld1q_u8(pbmul + 16);
+ uint8x16_t qm0 = vld1q_u8(qmul);
+ uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
+
+ /*
+ * while ( bytes-- ) {
+ * uint8_t px, qx, db;
+ *
+ * px = *p ^ *dp;
+ * qx = qmul[*q ^ *dq];
+ * *dq++ = db = pbmul[px] ^ qx;
+ * *dp++ = db ^ px;
+ * p++; q++;
+ * }
+ */
+
+ while (bytes) {
+ uint8x16_t vx, vy, px, qx, db;
+
+ px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
+ vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
+
+ vy = vshrq_n_u8(vx, 4);
+ vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
+ vy = vqtbl1q_u8(qm1, vy);
+ qx = veorq_u8(vx, vy);
+
+ vy = vshrq_n_u8(px, 4);
+ vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
+ vy = vqtbl1q_u8(pm1, vy);
+ vx = veorq_u8(vx, vy);
+ db = veorq_u8(vx, qx);
+
+ vst1q_u8(dq, db);
+ vst1q_u8(dp, veorq_u8(db, px));
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dp += 16;
+ dq += 16;
+ }
+}
+
+void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
+ const uint8_t *qmul)
+{
+ uint8x16_t qm0 = vld1q_u8(qmul);
+ uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
+
+ /*
+ * while (bytes--) {
+ * *p++ ^= *dq = qmul[*q ^ *dq];
+ * q++; dq++;
+ * }
+ */
+
+ while (bytes) {
+ uint8x16_t vx, vy;
+
+ vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
+
+ vy = vshrq_n_u8(vx, 4);
+ vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
+ vy = vqtbl1q_u8(qm1, vy);
+ vx = veorq_u8(vx, vy);
+ vy = veorq_u8(vx, vld1q_u8(p));
+
+ vst1q_u8(dq, vx);
+ vst1q_u8(p, vy);
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dq += 16;
+ }
+}
diff --git a/lib/raid6/recov_s390xc.c b/lib/raid6/recov_s390xc.c
new file mode 100644
index 000000000..179eec900
--- /dev/null
+++ b/lib/raid6/recov_s390xc.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RAID-6 data recovery in dual failure mode based on the XC instruction.
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/export.h>
+#include <linux/raid/pq.h>
+
+static inline void xor_block(u8 *p1, u8 *p2)
+{
+ typedef struct { u8 _[256]; } addrtype;
+
+ asm volatile(
+ " xc 0(256,%[p1]),0(%[p2])\n"
+ : "+m" (*(addrtype *) p1) : "m" (*(addrtype *) p2),
+ [p1] "a" (p1), [p2] "a" (p2) : "cc");
+}
+
+/* Recover two failed data blocks. */
+static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ int i;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
+
+ /* Now do it... */
+ while (bytes) {
+ xor_block(dp, p);
+ xor_block(dq, q);
+ for (i = 0; i < 256; i++)
+ dq[i] = pbmul[dp[i]] ^ qmul[dq[i]];
+ xor_block(dp, dq);
+ p += 256;
+ q += 256;
+ dp += 256;
+ dq += 256;
+ bytes -= 256;
+ }
+}
+
+/* Recover failure of one data block plus the P block */
+static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ int i;
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ /* Now do it... */
+ while (bytes) {
+ xor_block(dq, q);
+ for (i = 0; i < 256; i++)
+ dq[i] = qmul[dq[i]];
+ xor_block(p, dq);
+ p += 256;
+ q += 256;
+ dq += 256;
+ bytes -= 256;
+ }
+}
+
+
+const struct raid6_recov_calls raid6_recov_s390xc = {
+ .data2 = raid6_2data_recov_s390xc,
+ .datap = raid6_datap_recov_s390xc,
+ .valid = NULL,
+ .name = "s390xc",
+ .priority = 1,
+};
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
new file mode 100644
index 000000000..4bfa3c6b6
--- /dev/null
+++ b/lib/raid6/recov_ssse3.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Intel Corporation
+ */
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static int raid6_has_ssse3(void)
+{
+ return boot_cpu_has(X86_FEATURE_XMM) &&
+ boot_cpu_has(X86_FEATURE_XMM2) &&
+ boot_cpu_has(X86_FEATURE_SSSE3);
+}
+
+static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+ static const u8 __aligned(16) x0f[16] = {
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks-2] = p;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
+ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
+
+#ifdef CONFIG_X86_64
+ asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
+ asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
+ asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
+#endif
+
+ /* Now do it... */
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ /* xmm6, xmm14, xmm15 */
+
+ asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
+ asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
+ asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
+ asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
+ asm volatile("pxor %0,%%xmm1" : : "m" (dq[0]));
+ asm volatile("pxor %0,%%xmm9" : : "m" (dq[16]));
+ asm volatile("pxor %0,%%xmm0" : : "m" (dp[0]));
+ asm volatile("pxor %0,%%xmm8" : : "m" (dp[16]));
+
+ /* xmm0/8 = px */
+
+ asm volatile("movdqa %xmm6,%xmm4");
+ asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
+ asm volatile("movdqa %xmm6,%xmm12");
+ asm volatile("movdqa %xmm5,%xmm13");
+ asm volatile("movdqa %xmm1,%xmm3");
+ asm volatile("movdqa %xmm9,%xmm11");
+ asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
+ asm volatile("movdqa %xmm8,%xmm10");
+ asm volatile("psraw $4,%xmm1");
+ asm volatile("psraw $4,%xmm9");
+ asm volatile("pand %xmm7,%xmm3");
+ asm volatile("pand %xmm7,%xmm11");
+ asm volatile("pand %xmm7,%xmm1");
+ asm volatile("pand %xmm7,%xmm9");
+ asm volatile("pshufb %xmm3,%xmm4");
+ asm volatile("pshufb %xmm11,%xmm12");
+ asm volatile("pshufb %xmm1,%xmm5");
+ asm volatile("pshufb %xmm9,%xmm13");
+ asm volatile("pxor %xmm4,%xmm5");
+ asm volatile("pxor %xmm12,%xmm13");
+
+ /* xmm5/13 = qx */
+
+ asm volatile("movdqa %xmm14,%xmm4");
+ asm volatile("movdqa %xmm15,%xmm1");
+ asm volatile("movdqa %xmm14,%xmm12");
+ asm volatile("movdqa %xmm15,%xmm9");
+ asm volatile("movdqa %xmm2,%xmm3");
+ asm volatile("movdqa %xmm10,%xmm11");
+ asm volatile("psraw $4,%xmm2");
+ asm volatile("psraw $4,%xmm10");
+ asm volatile("pand %xmm7,%xmm3");
+ asm volatile("pand %xmm7,%xmm11");
+ asm volatile("pand %xmm7,%xmm2");
+ asm volatile("pand %xmm7,%xmm10");
+ asm volatile("pshufb %xmm3,%xmm4");
+ asm volatile("pshufb %xmm11,%xmm12");
+ asm volatile("pshufb %xmm2,%xmm1");
+ asm volatile("pshufb %xmm10,%xmm9");
+ asm volatile("pxor %xmm4,%xmm1");
+ asm volatile("pxor %xmm12,%xmm9");
+
+ /* xmm1/9 = pbmul[px] */
+ asm volatile("pxor %xmm5,%xmm1");
+ asm volatile("pxor %xmm13,%xmm9");
+ /* xmm1/9 = db = DQ */
+ asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
+ asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
+
+ asm volatile("pxor %xmm1,%xmm0");
+ asm volatile("pxor %xmm9,%xmm8");
+ asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
+ asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
+
+ bytes -= 32;
+ p += 32;
+ q += 32;
+ dp += 32;
+ dq += 32;
+#else
+ asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
+ asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
+ asm volatile("pxor %0,%%xmm1" : : "m" (*dq));
+ asm volatile("pxor %0,%%xmm0" : : "m" (*dp));
+
+ /* 1 = dq ^ q
+ * 0 = dp ^ p
+ */
+ asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
+ asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
+
+ asm volatile("movdqa %xmm1,%xmm3");
+ asm volatile("psraw $4,%xmm1");
+ asm volatile("pand %xmm7,%xmm3");
+ asm volatile("pand %xmm7,%xmm1");
+ asm volatile("pshufb %xmm3,%xmm4");
+ asm volatile("pshufb %xmm1,%xmm5");
+ asm volatile("pxor %xmm4,%xmm5");
+
+ asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
+
+ /* xmm5 = qx */
+
+ asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
+ asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
+ asm volatile("movdqa %xmm2,%xmm3");
+ asm volatile("psraw $4,%xmm2");
+ asm volatile("pand %xmm7,%xmm3");
+ asm volatile("pand %xmm7,%xmm2");
+ asm volatile("pshufb %xmm3,%xmm4");
+ asm volatile("pshufb %xmm2,%xmm1");
+ asm volatile("pxor %xmm4,%xmm1");
+
+ /* xmm1 = pbmul[px] */
+ asm volatile("pxor %xmm5,%xmm1");
+ /* xmm1 = db = DQ */
+ asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
+
+ asm volatile("pxor %xmm1,%xmm0");
+ asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dp += 16;
+ dq += 16;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+
+static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+ static const u8 __aligned(16) x0f[16] = {
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
+
+ p = (u8 *)ptrs[disks-2];
+ q = (u8 *)ptrs[disks-1];
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks-1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks-1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
+
+ while (bytes) {
+#ifdef CONFIG_X86_64
+ asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
+ asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
+ asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
+ asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
+
+ /* xmm3 = q[0] ^ dq[0] */
+
+ asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
+ asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
+
+ /* xmm4 = q[16] ^ dq[16] */
+
+ asm volatile("movdqa %xmm3, %xmm6");
+ asm volatile("movdqa %xmm4, %xmm8");
+
+ /* xmm4 = xmm8 = q[16] ^ dq[16] */
+
+ asm volatile("psraw $4, %xmm3");
+ asm volatile("pand %xmm7, %xmm6");
+ asm volatile("pand %xmm7, %xmm3");
+ asm volatile("pshufb %xmm6, %xmm0");
+ asm volatile("pshufb %xmm3, %xmm1");
+ asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
+ asm volatile("pxor %xmm0, %xmm1");
+ asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
+
+ /* xmm1 = qmul[q[0] ^ dq[0]] */
+
+ asm volatile("psraw $4, %xmm4");
+ asm volatile("pand %xmm7, %xmm8");
+ asm volatile("pand %xmm7, %xmm4");
+ asm volatile("pshufb %xmm8, %xmm10");
+ asm volatile("pshufb %xmm4, %xmm11");
+ asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
+ asm volatile("pxor %xmm10, %xmm11");
+ asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
+
+ /* xmm11 = qmul[q[16] ^ dq[16]] */
+
+ asm volatile("pxor %xmm1, %xmm2");
+
+ /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
+
+ asm volatile("pxor %xmm11, %xmm12");
+
+ /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
+
+ asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
+ asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
+
+ asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
+ asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
+
+ bytes -= 32;
+ p += 32;
+ q += 32;
+ dq += 32;
+
+#else
+ asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
+ asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
+ asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
+ asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
+
+ /* xmm3 = *q ^ *dq */
+
+ asm volatile("movdqa %xmm3, %xmm6");
+ asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
+ asm volatile("psraw $4, %xmm3");
+ asm volatile("pand %xmm7, %xmm6");
+ asm volatile("pand %xmm7, %xmm3");
+ asm volatile("pshufb %xmm6, %xmm0");
+ asm volatile("pshufb %xmm3, %xmm1");
+ asm volatile("pxor %xmm0, %xmm1");
+
+ /* xmm1 = qmul[*q ^ *dq */
+
+ asm volatile("pxor %xmm1, %xmm2");
+
+ /* xmm2 = *p ^ qmul[*q ^ *dq] */
+
+ asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
+ asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
+
+ bytes -= 16;
+ p += 16;
+ q += 16;
+ dq += 16;
+#endif
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_ssse3 = {
+ .data2 = raid6_2data_recov_ssse3,
+ .datap = raid6_datap_recov_ssse3,
+ .valid = raid6_has_ssse3,
+#ifdef CONFIG_X86_64
+ .name = "ssse3x2",
+#else
+ .name = "ssse3x1",
+#endif
+ .priority = 1,
+};
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
new file mode 100644
index 000000000..b25dfc9c7
--- /dev/null
+++ b/lib/raid6/s390vx.uc
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * raid6_vx$#.c
+ *
+ * $#-way unrolled RAID6 gen/xor functions for s390
+ * based on the vector facility
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This file is postprocessed using unroll.awk.
+ */
+
+#include <linux/raid/pq.h>
+#include <asm/fpu/api.h>
+#include <asm/vx-insn.h>
+
+#define NSIZE 16
+
+static inline void LOAD_CONST(void)
+{
+ asm volatile("VREPIB %v24,7");
+ asm volatile("VREPIB %v25,0x1d");
+}
+
+/*
+ * The SHLBYTE() operation shifts each of the 16 bytes in
+ * vector register y left by 1 bit and stores the result in
+ * vector register x.
+ */
+static inline void SHLBYTE(int x, int y)
+{
+ asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
+}
+
+/*
+ * For each of the 16 bytes in the vector register y the MASK()
+ * operation returns 0xFF if the high bit of the byte is 1,
+ * or 0x00 if the high bit is 0. The result is stored in vector
+ * register x.
+ */
+static inline void MASK(int x, int y)
+{
+ asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
+}
+
+static inline void AND(int x, int y, int z)
+{
+ asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
+}
+
+static inline void XOR(int x, int y, int z)
+{
+ asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
+}
+
+static inline void LOAD_DATA(int x, u8 *ptr)
+{
+ typedef struct { u8 _[16 * $#]; } addrtype;
+ register addrtype *__ptr asm("1") = (addrtype *) ptr;
+
+ asm volatile ("VLM %2,%3,0,%1"
+ : : "m" (*__ptr), "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
+}
+
+static inline void STORE_DATA(int x, u8 *ptr)
+{
+ typedef struct { u8 _[16 * $#]; } addrtype;
+ register addrtype *__ptr asm("1") = (addrtype *) ptr;
+
+ asm volatile ("VSTM %2,%3,0,1"
+ : "=m" (*__ptr) : "a" (__ptr), "i" (x),
+ "i" (x + $# - 1));
+}
+
+static inline void COPY_VEC(int x, int y)
+{
+ asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
+}
+
+static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ struct kernel_fpu vxstate;
+ u8 **dptr, *p, *q;
+ int d, z, z0;
+
+ kernel_fpu_begin(&vxstate, KERNEL_VXR);
+ LOAD_CONST();
+
+ dptr = (u8 **) ptrs;
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0 + 1]; /* XOR parity */
+ q = dptr[z0 + 2]; /* RS syndrome */
+
+ for (d = 0; d < bytes; d += $#*NSIZE) {
+ LOAD_DATA(0,&dptr[z0][d]);
+ COPY_VEC(8+$$,0+$$);
+ for (z = z0 - 1; z >= 0; z--) {
+ MASK(16+$$,8+$$);
+ AND(16+$$,16+$$,25);
+ SHLBYTE(8+$$,8+$$);
+ XOR(8+$$,8+$$,16+$$);
+ LOAD_DATA(16,&dptr[z][d]);
+ XOR(0+$$,0+$$,16+$$);
+ XOR(8+$$,8+$$,16+$$);
+ }
+ STORE_DATA(0,&p[d]);
+ STORE_DATA(8,&q[d]);
+ }
+ kernel_fpu_end(&vxstate, KERNEL_VXR);
+}
+
+static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ struct kernel_fpu vxstate;
+ u8 **dptr, *p, *q;
+ int d, z, z0;
+
+ dptr = (u8 **) ptrs;
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks - 2]; /* XOR parity */
+ q = dptr[disks - 1]; /* RS syndrome */
+
+ kernel_fpu_begin(&vxstate, KERNEL_VXR);
+ LOAD_CONST();
+
+ for (d = 0; d < bytes; d += $#*NSIZE) {
+ /* P/Q data pages */
+ LOAD_DATA(0,&dptr[z0][d]);
+ COPY_VEC(8+$$,0+$$);
+ for (z = z0 - 1; z >= start; z--) {
+ MASK(16+$$,8+$$);
+ AND(16+$$,16+$$,25);
+ SHLBYTE(8+$$,8+$$);
+ XOR(8+$$,8+$$,16+$$);
+ LOAD_DATA(16,&dptr[z][d]);
+ XOR(0+$$,0+$$,16+$$);
+ XOR(8+$$,8+$$,16+$$);
+ }
+ /* P/Q left side optimization */
+ for (z = start - 1; z >= 0; z--) {
+ MASK(16+$$,8+$$);
+ AND(16+$$,16+$$,25);
+ SHLBYTE(8+$$,8+$$);
+ XOR(8+$$,8+$$,16+$$);
+ }
+ LOAD_DATA(16,&p[d]);
+ XOR(16+$$,16+$$,0+$$);
+ STORE_DATA(16,&p[d]);
+ LOAD_DATA(16,&q[d]);
+ XOR(16+$$,16+$$,8+$$);
+ STORE_DATA(16,&q[d]);
+ }
+ kernel_fpu_end(&vxstate, KERNEL_VXR);
+}
+
+static int raid6_s390vx$#_valid(void)
+{
+ return MACHINE_HAS_VX;
+}
+
+const struct raid6_calls raid6_s390vx$# = {
+ raid6_s390vx$#_gen_syndrome,
+ raid6_s390vx$#_xor_syndrome,
+ raid6_s390vx$#_valid,
+ "vx128x$#",
+ 1
+};
diff --git a/lib/raid6/sse1.c b/lib/raid6/sse1.c
new file mode 100644
index 000000000..692fa3a93
--- /dev/null
+++ b/lib/raid6/sse1.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6/sse1.c
+ *
+ * SSE-1/MMXEXT implementation of RAID-6 syndrome functions
+ *
+ * This is really an MMX implementation, but it requires SSE-1 or
+ * AMD MMXEXT for prefetch support and a few other features. The
+ * support for nontemporal memory accesses is enough to make this
+ * worthwhile as a separate implementation.
+ */
+
+#ifdef CONFIG_X86_32
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+/* Defined in raid6/mmx.c */
+extern const struct raid6_mmx_constants {
+ u64 x1d;
+} raid6_mmx_constants;
+
+static int raid6_have_sse1_or_mmxext(void)
+{
+ /* Not really boot_cpu but "all_cpus" */
+ return boot_cpu_has(X86_FEATURE_MMX) &&
+ (boot_cpu_has(X86_FEATURE_XMM) ||
+ boot_cpu_has(X86_FEATURE_MMXEXT));
+}
+
+/*
+ * Plain SSE1 implementation
+ */
+static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
+ asm volatile("pxor %mm5,%mm5"); /* Zero temp */
+
+ for ( d = 0 ; d < bytes ; d += 8 ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
+ asm volatile("movq %mm2,%mm4"); /* Q[0] */
+ asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
+ for ( z = z0-2 ; z >= 0 ; z-- ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("pcmpgtb %mm4,%mm5");
+ asm volatile("paddb %mm4,%mm4");
+ asm volatile("pand %mm0,%mm5");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm5,%mm5");
+ asm volatile("pxor %mm6,%mm2");
+ asm volatile("pxor %mm6,%mm4");
+ asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
+ }
+ asm volatile("pcmpgtb %mm4,%mm5");
+ asm volatile("paddb %mm4,%mm4");
+ asm volatile("pand %mm0,%mm5");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm5,%mm5");
+ asm volatile("pxor %mm6,%mm2");
+ asm volatile("pxor %mm6,%mm4");
+
+ asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
+ asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_sse1x1 = {
+ raid6_sse11_gen_syndrome,
+ NULL, /* XOR not yet implemented */
+ raid6_have_sse1_or_mmxext,
+ "sse1x1",
+ 1 /* Has cache hints */
+};
+
+/*
+ * Unrolled-by-2 SSE1 implementation
+ */
+static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
+ asm volatile("pxor %mm5,%mm5"); /* Zero temp */
+ asm volatile("pxor %mm7,%mm7"); /* Zero temp */
+
+ /* We uniformly assume a single prefetch covers at least 16 bytes */
+ for ( d = 0 ; d < bytes ; d += 16 ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
+ asm volatile("movq %mm2,%mm4"); /* Q[0] */
+ asm volatile("movq %mm3,%mm6"); /* Q[1] */
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("pcmpgtb %mm4,%mm5");
+ asm volatile("pcmpgtb %mm6,%mm7");
+ asm volatile("paddb %mm4,%mm4");
+ asm volatile("paddb %mm6,%mm6");
+ asm volatile("pand %mm0,%mm5");
+ asm volatile("pand %mm0,%mm7");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm7,%mm6");
+ asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
+ asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
+ asm volatile("pxor %mm5,%mm2");
+ asm volatile("pxor %mm7,%mm3");
+ asm volatile("pxor %mm5,%mm4");
+ asm volatile("pxor %mm7,%mm6");
+ asm volatile("pxor %mm5,%mm5");
+ asm volatile("pxor %mm7,%mm7");
+ }
+ asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
+ asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
+ asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
+ asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
+ }
+
+ asm volatile("sfence" : :: "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_sse1x2 = {
+ raid6_sse12_gen_syndrome,
+ NULL, /* XOR not yet implemented */
+ raid6_have_sse1_or_mmxext,
+ "sse1x2",
+ 1 /* Has cache hints */
+};
+
+#endif
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
new file mode 100644
index 000000000..293022024
--- /dev/null
+++ b/lib/raid6/sse2.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6/sse2.c
+ *
+ * SSE-2 implementation of RAID-6 syndrome functions
+ *
+ */
+
+#include <linux/raid/pq.h>
+#include "x86.h"
+
+static const struct raid6_sse_constants {
+ u64 x1d[2];
+} raid6_sse_constants __attribute__((aligned(16))) = {
+ { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
+};
+
+static int raid6_have_sse2(void)
+{
+ /* Not really boot_cpu but "all_cpus" */
+ return boot_cpu_has(X86_FEATURE_MMX) &&
+ boot_cpu_has(X86_FEATURE_FXSR) &&
+ boot_cpu_has(X86_FEATURE_XMM) &&
+ boot_cpu_has(X86_FEATURE_XMM2);
+}
+
+/*
+ * Plain SSE2 implementation
+ */
+static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
+ asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
+
+ for ( d = 0 ; d < bytes ; d += 16 ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
+ asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
+ asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
+ for ( z = z0-2 ; z >= 0 ; z-- ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm6,%xmm2");
+ asm volatile("pxor %xmm6,%xmm4");
+ asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
+ }
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm6,%xmm2");
+ asm volatile("pxor %xmm6,%xmm4");
+
+ asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("pxor %xmm2,%xmm2");
+ asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("pxor %xmm4,%xmm4");
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+
+static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
+
+ for ( d = 0 ; d < bytes ; d += 16 ) {
+ asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
+ asm volatile("pxor %xmm4,%xmm2");
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm5,%xmm4");
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pxor %xmm5,%xmm4");
+ }
+ asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_sse2x1 = {
+ raid6_sse21_gen_syndrome,
+ raid6_sse21_xor_syndrome,
+ raid6_have_sse2,
+ "sse2x1",
+ 1 /* Has cache hints */
+};
+
+/*
+ * Unrolled-by-2 SSE2 implementation
+ */
+static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
+ asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
+ asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
+
+ /* We uniformly assume a single prefetch covers at least 32 bytes */
+ for ( d = 0 ; d < bytes ; d += 32 ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
+ asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
+ asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
+ asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
+ asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm7,%xmm3");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ }
+ asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
+ asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
+
+ for ( d = 0 ; d < bytes ; d += 32 ) {
+ asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
+ asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
+ asm volatile("pxor %xmm4,%xmm2");
+ asm volatile("pxor %xmm6,%xmm3");
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm7,%xmm3");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ }
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ }
+ asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
+ asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
+ /* Don't use movntdq for r/w memory area < cache line */
+ asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
+ asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_sse2x2 = {
+ raid6_sse22_gen_syndrome,
+ raid6_sse22_xor_syndrome,
+ raid6_have_sse2,
+ "sse2x2",
+ 1 /* Has cache hints */
+};
+
+#ifdef CONFIG_X86_64
+
+/*
+ * Unrolled-by-4 SSE2 implementation
+ */
+static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
+ asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
+ asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
+ asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
+ asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
+ asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
+ asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
+ asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
+ asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
+ asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
+ asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
+ asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
+ asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
+
+ for ( d = 0 ; d < bytes ; d += 64 ) {
+ for ( z = z0 ; z >= 0 ; z-- ) {
+ /* The second prefetch seems to improve performance... */
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("pcmpgtb %xmm12,%xmm13");
+ asm volatile("pcmpgtb %xmm14,%xmm15");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("paddb %xmm12,%xmm12");
+ asm volatile("paddb %xmm14,%xmm14");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pand %xmm0,%xmm13");
+ asm volatile("pand %xmm0,%xmm15");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
+ asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
+ asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm7,%xmm3");
+ asm volatile("pxor %xmm13,%xmm10");
+ asm volatile("pxor %xmm15,%xmm11");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pxor %xmm13,%xmm13");
+ asm volatile("pxor %xmm15,%xmm15");
+ }
+ asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("pxor %xmm2,%xmm2");
+ asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
+ asm volatile("pxor %xmm3,%xmm3");
+ asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
+ asm volatile("pxor %xmm10,%xmm10");
+ asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
+ asm volatile("pxor %xmm11,%xmm11");
+ asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("pxor %xmm4,%xmm4");
+ asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
+ asm volatile("pxor %xmm6,%xmm6");
+ asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
+ asm volatile("pxor %xmm12,%xmm12");
+ asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
+ asm volatile("pxor %xmm14,%xmm14");
+ }
+
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
+
+ for ( d = 0 ; d < bytes ; d += 64 ) {
+ asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
+ asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
+ asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
+ asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
+ asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
+ asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
+ asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
+ asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
+ asm volatile("pxor %xmm4,%xmm2");
+ asm volatile("pxor %xmm6,%xmm3");
+ asm volatile("pxor %xmm12,%xmm10");
+ asm volatile("pxor %xmm14,%xmm11");
+ /* P/Q data pages */
+ for ( z = z0-1 ; z >= start ; z-- ) {
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+ asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pxor %xmm13,%xmm13");
+ asm volatile("pxor %xmm15,%xmm15");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("pcmpgtb %xmm12,%xmm13");
+ asm volatile("pcmpgtb %xmm14,%xmm15");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("paddb %xmm12,%xmm12");
+ asm volatile("paddb %xmm14,%xmm14");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pand %xmm0,%xmm13");
+ asm volatile("pand %xmm0,%xmm15");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
+ asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
+ asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
+ asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
+ asm volatile("pxor %xmm5,%xmm2");
+ asm volatile("pxor %xmm7,%xmm3");
+ asm volatile("pxor %xmm13,%xmm10");
+ asm volatile("pxor %xmm15,%xmm11");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ }
+ asm volatile("prefetchnta %0" :: "m" (q[d]));
+ asm volatile("prefetchnta %0" :: "m" (q[d+32]));
+ /* P/Q left side optimization */
+ for ( z = start-1 ; z >= 0 ; z-- ) {
+ asm volatile("pxor %xmm5,%xmm5");
+ asm volatile("pxor %xmm7,%xmm7");
+ asm volatile("pxor %xmm13,%xmm13");
+ asm volatile("pxor %xmm15,%xmm15");
+ asm volatile("pcmpgtb %xmm4,%xmm5");
+ asm volatile("pcmpgtb %xmm6,%xmm7");
+ asm volatile("pcmpgtb %xmm12,%xmm13");
+ asm volatile("pcmpgtb %xmm14,%xmm15");
+ asm volatile("paddb %xmm4,%xmm4");
+ asm volatile("paddb %xmm6,%xmm6");
+ asm volatile("paddb %xmm12,%xmm12");
+ asm volatile("paddb %xmm14,%xmm14");
+ asm volatile("pand %xmm0,%xmm5");
+ asm volatile("pand %xmm0,%xmm7");
+ asm volatile("pand %xmm0,%xmm13");
+ asm volatile("pand %xmm0,%xmm15");
+ asm volatile("pxor %xmm5,%xmm4");
+ asm volatile("pxor %xmm7,%xmm6");
+ asm volatile("pxor %xmm13,%xmm12");
+ asm volatile("pxor %xmm15,%xmm14");
+ }
+ asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
+ asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
+ asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
+ asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
+ asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
+ asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
+ asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
+ asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
+ asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
+ asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
+ asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
+ asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
+ }
+ asm volatile("sfence" : : : "memory");
+ kernel_fpu_end();
+}
+
+
+const struct raid6_calls raid6_sse2x4 = {
+ raid6_sse24_gen_syndrome,
+ raid6_sse24_xor_syndrome,
+ raid6_have_sse2,
+ "sse2x4",
+ 1 /* Has cache hints */
+};
+
+#endif /* CONFIG_X86_64 */
diff --git a/lib/raid6/test/.gitignore b/lib/raid6/test/.gitignore
new file mode 100644
index 000000000..1b68a77f3
--- /dev/null
+++ b/lib/raid6/test/.gitignore
@@ -0,0 +1,3 @@
+/int.uc
+/neon.uc
+/raid6test
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
new file mode 100644
index 000000000..2abe0076a
--- /dev/null
+++ b/lib/raid6/test/Makefile
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# This is a simple Makefile to test some of the RAID-6 code
+# from userspace.
+#
+
+pound := \#
+
+# Adjust as desired
+CC = gcc
+OPTFLAGS = -O2
+CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS)
+LD = ld
+AWK = awk -f
+AR = ar
+RANLIB = ranlib
+OBJS = int1.o int2.o int4.o int8.o int16.o int32.o recov.o algos.o tables.o
+
+ARCH := $(shell uname -m 2>/dev/null | sed -e /s/i.86/i386/)
+ifeq ($(ARCH),i386)
+ CFLAGS += -DCONFIG_X86_32
+ IS_X86 = yes
+endif
+ifeq ($(ARCH),x86_64)
+ CFLAGS += -DCONFIG_X86_64
+ IS_X86 = yes
+endif
+
+ifeq ($(ARCH),arm)
+ CFLAGS += -I../../../arch/arm/include -mfpu=neon
+ HAS_NEON = yes
+endif
+ifeq ($(ARCH),aarch64)
+ CFLAGS += -I../../../arch/arm64/include
+ HAS_NEON = yes
+endif
+
+ifeq ($(findstring ppc,$(ARCH)),ppc)
+ CFLAGS += -I../../../arch/powerpc/include
+ HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\
+ gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
+endif
+
+ifeq ($(ARCH),loongarch64)
+ CFLAGS += -I../../../arch/loongarch/include -DCONFIG_LOONGARCH=1
+ CFLAGS += $(shell echo 'vld $$vr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LSX=1)
+ CFLAGS += $(shell echo 'xvld $$xr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LASX=1)
+endif
+
+ifeq ($(IS_X86),yes)
+ OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
+ CFLAGS += -DCONFIG_X86
+ CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_AS_AVX512=1)
+else ifeq ($(HAS_NEON),yes)
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
+ CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
+else ifeq ($(HAS_ALTIVEC),yes)
+ CFLAGS += -DCONFIG_ALTIVEC
+ OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
+ vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+else ifeq ($(ARCH),loongarch64)
+ OBJS += loongarch_simd.o recov_loongarch_simd.o
+endif
+
+.c.o:
+ $(CC) $(CFLAGS) -c -o $@ $<
+
+%.c: ../%.c
+ cp -f $< $@
+
+%.uc: ../%.uc
+ cp -f $< $@
+
+all: raid6.a raid6test
+
+raid6.a: $(OBJS)
+ rm -f $@
+ $(AR) cq $@ $^
+ $(RANLIB) $@
+
+raid6test: test.c raid6.a
+ $(CC) $(CFLAGS) -o raid6test $^
+
+neon1.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < neon.uc > $@
+
+neon2.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < neon.uc > $@
+
+neon4.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < neon.uc > $@
+
+neon8.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < neon.uc > $@
+
+altivec1.c: altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < altivec.uc > $@
+
+altivec2.c: altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < altivec.uc > $@
+
+altivec4.c: altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < altivec.uc > $@
+
+altivec8.c: altivec.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@
+
+vpermxor1.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@
+
+vpermxor2.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@
+
+vpermxor4.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@
+
+vpermxor8.c: vpermxor.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@
+
+int1.c: int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < int.uc > $@
+
+int2.c: int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < int.uc > $@
+
+int4.c: int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < int.uc > $@
+
+int8.c: int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < int.uc > $@
+
+int16.c: int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=16 < int.uc > $@
+
+int32.c: int.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=32 < int.uc > $@
+
+tables.c: mktables
+ ./mktables > tables.c
+
+clean:
+ rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test
+
+spotless: clean
+ rm -f *~
diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c
new file mode 100644
index 000000000..841a55242
--- /dev/null
+++ b/lib/raid6/test/test.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6test.c
+ *
+ * Test RAID-6 recovery with various algorithms
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <linux/raid/pq.h>
+
+#define NDISKS 16 /* Including P and Q */
+
+const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+
+char *dataptrs[NDISKS];
+char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+
+static void makedata(int start, int stop)
+{
+ int i, j;
+
+ for (i = start; i <= stop; i++) {
+ for (j = 0; j < PAGE_SIZE; j++)
+ data[i][j] = rand();
+
+ dataptrs[i] = data[i];
+ }
+}
+
+static char disk_type(int d)
+{
+ switch (d) {
+ case NDISKS-2:
+ return 'P';
+ case NDISKS-1:
+ return 'Q';
+ default:
+ return 'D';
+ }
+}
+
+static int test_disks(int i, int j)
+{
+ int erra, errb;
+
+ memset(recovi, 0xf0, PAGE_SIZE);
+ memset(recovj, 0xba, PAGE_SIZE);
+
+ dataptrs[i] = recovi;
+ dataptrs[j] = recovj;
+
+ raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
+
+ erra = memcmp(data[i], recovi, PAGE_SIZE);
+ errb = memcmp(data[j], recovj, PAGE_SIZE);
+
+ if (i < NDISKS-2 && j == NDISKS-1) {
+ /* We don't implement the DQ failure scenario, since it's
+ equivalent to a RAID-5 failure (XOR, then recompute Q) */
+ erra = errb = 0;
+ } else {
+ printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
+ raid6_call.name,
+ i, disk_type(i),
+ j, disk_type(j),
+ (!erra && !errb) ? "OK" :
+ !erra ? "ERRB" :
+ !errb ? "ERRA" : "ERRAB");
+ }
+
+ dataptrs[i] = data[i];
+ dataptrs[j] = data[j];
+
+ return erra || errb;
+}
+
+int main(int argc, char *argv[])
+{
+ const struct raid6_calls *const *algo;
+ const struct raid6_recov_calls *const *ra;
+ int i, j, p1, p2;
+ int err = 0;
+
+ makedata(0, NDISKS-1);
+
+ for (ra = raid6_recov_algos; *ra; ra++) {
+ if ((*ra)->valid && !(*ra)->valid())
+ continue;
+
+ raid6_2data_recov = (*ra)->data2;
+ raid6_datap_recov = (*ra)->datap;
+
+ printf("using recovery %s\n", (*ra)->name);
+
+ for (algo = raid6_algos; *algo; algo++) {
+ if ((*algo)->valid && !(*algo)->valid())
+ continue;
+
+ raid6_call = **algo;
+
+ /* Nuke syndromes */
+ memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
+
+ /* Generate assumed good syndrome */
+ raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
+ (void **)&dataptrs);
+
+ for (i = 0; i < NDISKS-1; i++)
+ for (j = i+1; j < NDISKS; j++)
+ err += test_disks(i, j);
+
+ if (!raid6_call.xor_syndrome)
+ continue;
+
+ for (p1 = 0; p1 < NDISKS-2; p1++)
+ for (p2 = p1; p2 < NDISKS-2; p2++) {
+
+ /* Simulate rmw run */
+ raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ (void **)&dataptrs);
+ makedata(p1, p2);
+ raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
+ (void **)&dataptrs);
+
+ for (i = 0; i < NDISKS-1; i++)
+ for (j = i+1; j < NDISKS; j++)
+ err += test_disks(i, j);
+ }
+
+ }
+ printf("\n");
+ }
+
+ printf("\n");
+ /* Pick the best algorithm test */
+ raid6_select_algo();
+
+ if (err)
+ printf("\n*** ERRORS FOUND ***\n");
+
+ return err;
+}
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
new file mode 100644
index 000000000..0809805a7
--- /dev/null
+++ b/lib/raid6/unroll.awk
@@ -0,0 +1,20 @@
+
+# This filter requires one command line option of form -vN=n
+# where n must be a decimal number.
+#
+# Repeat each input line containing $$ n times, replacing $$ with 0...n-1.
+# Replace each $# with n, and each $* with a single $.
+
+BEGIN {
+ n = N + 0
+}
+{
+ if (/\$\$/) { rep = n } else { rep = 1 }
+ for (i = 0; i < rep; ++i) {
+ tmp = $0
+ gsub(/\$\$/, i, tmp)
+ gsub(/\$#/, n, tmp)
+ gsub(/\$\*/, "$", tmp)
+ print tmp
+ }
+}
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
new file mode 100644
index 000000000..1bfb127fb
--- /dev/null
+++ b/lib/raid6/vpermxor.uc
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2017, Matt Brown, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * vpermxor$#.c
+ *
+ * Based on H. Peter Anvin's paper - The mathematics of RAID-6
+ *
+ * $#-way unrolled portable integer math RAID-6 instruction set
+ * This file is postprocessed using unroll.awk
+ *
+ * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
+ * syndrome calculations.
+ * This can be run on systems which have both Altivec and vpermxor instruction.
+ *
+ * This instruction was introduced in POWER8 - ISA v2.07.
+ */
+
+#include <linux/raid/pq.h>
+#ifdef CONFIG_ALTIVEC
+
+#include <altivec.h>
+#include <asm/ppc-opcode.h>
+#ifdef __KERNEL__
+#include <asm/cputable.h>
+#include <asm/switch_to.h>
+#endif
+
+typedef vector unsigned char unative_t;
+#define NSIZE sizeof(unative_t)
+
+static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
+ 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
+ 0x06, 0x04, 0x02,0x00};
+static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
+ 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
+ 0x60, 0x40, 0x20, 0x00};
+
+static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
+ void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+ unative_t wp$$, wq$$, wd$$;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for (d = 0; d < bytes; d += NSIZE*$#) {
+ wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
+
+ for (z = z0-1; z>=0; z--) {
+ wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
+ /* P syndrome */
+ wp$$ = vec_xor(wp$$, wd$$);
+
+ /* Q syndrome */
+ asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
+ wq$$ = vec_xor(wq$$, wd$$);
+ }
+ *(unative_t *)&p[d+NSIZE*$$] = wp$$;
+ *(unative_t *)&q[d+NSIZE*$$] = wq$$;
+ }
+}
+
+static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+
+ raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
+
+ disable_kernel_altivec();
+ preempt_enable();
+}
+
+int raid6_have_altivec_vpermxor(void);
+#if $# == 1
+int raid6_have_altivec_vpermxor(void)
+{
+ /* Check if arch has both altivec and the vpermxor instructions */
+# ifdef __KERNEL__
+ return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S));
+# else
+ return 1;
+#endif
+
+}
+#endif
+
+const struct raid6_calls raid6_vpermxor$# = {
+ raid6_vpermxor$#_gen_syndrome,
+ NULL,
+ raid6_have_altivec_vpermxor,
+ "vpermxor$#",
+ 0
+};
+#endif
diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h
new file mode 100644
index 000000000..9a6ff3711
--- /dev/null
+++ b/lib/raid6/x86.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * raid6/x86.h
+ *
+ * Definitions common to x86 and x86-64 RAID-6 code only
+ */
+
+#ifndef LINUX_RAID_RAID6X86_H
+#define LINUX_RAID_RAID6X86_H
+
+#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
+
+#ifdef __KERNEL__ /* Real code */
+
+#include <asm/fpu/api.h>
+
+#else /* Dummy code for user space testing */
+
+static inline void kernel_fpu_begin(void)
+{
+}
+
+static inline void kernel_fpu_end(void)
+{
+}
+
+#define __aligned(x) __attribute__((aligned(x)))
+
+#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions
+ * (fast save and restore) */
+#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ (9*32+17) /* AVX-512 DQ (Double/Quad granular)
+ * Instructions
+ */
+#define X86_FEATURE_AVX512BW (9*32+30) /* AVX-512 BW (Byte/Word granular)
+ * Instructions
+ */
+#define X86_FEATURE_AVX512VL (9*32+31) /* AVX-512 VL (128/256 Vector Length)
+ * Extensions
+ */
+#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
+
+/* Should work well enough on modern CPUs for testing */
+static inline int boot_cpu_has(int flag)
+{
+ u32 eax, ebx, ecx, edx;
+
+ eax = (flag & 0x100) ? 7 :
+ (flag & 0x20) ? 0x80000001 : 1;
+ ecx = 0;
+
+ asm volatile("cpuid"
+ : "+a" (eax), "=b" (ebx), "=d" (edx), "+c" (ecx));
+
+ return ((flag & 0x100 ? ebx :
+ (flag & 0x80) ? ecx : edx) >> (flag & 31)) & 1;
+}
+
+#endif /* ndef __KERNEL__ */
+
+#endif
+#endif
diff --git a/lib/random32.c b/lib/random32.c
new file mode 100644
index 000000000..32060b852
--- /dev/null
+++ b/lib/random32.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is a maximally equidistributed combined Tausworthe generator
+ * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+ *
+ * lfsr113 version:
+ *
+ * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
+ *
+ * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
+ * s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
+ * s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
+ * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
+ *
+ * The period of this generator is about 2^113 (see erratum paper).
+ *
+ * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+ * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
+ * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+ *
+ * There is an erratum in the paper "Tables of Maximally Equidistributed
+ * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
+ * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+ *
+ * ... the k_j most significant bits of z_j must be non-zero,
+ * for each j. (Note: this restriction also applies to the
+ * computer code given in [4], but was mistakenly not mentioned
+ * in that paper.)
+ *
+ * This affects the seeding procedure by imposing the requirement
+ * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
+ */
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+/**
+ * prandom_u32_state - seeded pseudo-random number generator.
+ * @state: pointer to state structure holding seeded state.
+ *
+ * This is used for pseudo-randomness with no outside seeding.
+ * For more random results, use get_random_u32().
+ */
+u32 prandom_u32_state(struct rnd_state *state)
+{
+#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
+ state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
+ state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
+ state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
+ state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
+
+ return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
+}
+EXPORT_SYMBOL(prandom_u32_state);
+
+/**
+ * prandom_bytes_state - get the requested number of pseudo-random bytes
+ *
+ * @state: pointer to state structure holding seeded state.
+ * @buf: where to copy the pseudo-random bytes to
+ * @bytes: the requested number of bytes
+ *
+ * This is used for pseudo-randomness with no outside seeding.
+ * For more random results, use get_random_bytes().
+ */
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
+{
+ u8 *ptr = buf;
+
+ while (bytes >= sizeof(u32)) {
+ put_unaligned(prandom_u32_state(state), (u32 *) ptr);
+ ptr += sizeof(u32);
+ bytes -= sizeof(u32);
+ }
+
+ if (bytes > 0) {
+ u32 rem = prandom_u32_state(state);
+ do {
+ *ptr++ = (u8) rem;
+ bytes--;
+ rem >>= BITS_PER_BYTE;
+ } while (bytes > 0);
+ }
+}
+EXPORT_SYMBOL(prandom_bytes_state);
+
+static void prandom_warmup(struct rnd_state *state)
+{
+ /* Calling RNG ten times to satisfy recurrence condition */
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+ prandom_u32_state(state);
+}
+
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
+ u32 seeds[4];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+ state->s1 = __seed(seeds[0], 2U);
+ state->s2 = __seed(seeds[1], 8U);
+ state->s3 = __seed(seeds[2], 16U);
+ state->s4 = __seed(seeds[3], 128U);
+
+ prandom_warmup(state);
+ }
+}
+EXPORT_SYMBOL(prandom_seed_full_state);
+
+#ifdef CONFIG_RANDOM32_SELFTEST
+static struct prandom_test1 {
+ u32 seed;
+ u32 result;
+} test1[] = {
+ { 1U, 3484351685U },
+ { 2U, 2623130059U },
+ { 3U, 3125133893U },
+ { 4U, 984847254U },
+};
+
+static struct prandom_test2 {
+ u32 seed;
+ u32 iteration;
+ u32 result;
+} test2[] = {
+ /* Test cases against taus113 from GSL library. */
+ { 931557656U, 959U, 2975593782U },
+ { 1339693295U, 876U, 3887776532U },
+ { 1545556285U, 961U, 1615538833U },
+ { 601730776U, 723U, 1776162651U },
+ { 1027516047U, 687U, 511983079U },
+ { 416526298U, 700U, 916156552U },
+ { 1395522032U, 652U, 2222063676U },
+ { 366221443U, 617U, 2992857763U },
+ { 1539836965U, 714U, 3783265725U },
+ { 556206671U, 994U, 799626459U },
+ { 684907218U, 799U, 367789491U },
+ { 2121230701U, 931U, 2115467001U },
+ { 1668516451U, 644U, 3620590685U },
+ { 768046066U, 883U, 2034077390U },
+ { 1989159136U, 833U, 1195767305U },
+ { 536585145U, 996U, 3577259204U },
+ { 1008129373U, 642U, 1478080776U },
+ { 1740775604U, 939U, 1264980372U },
+ { 1967883163U, 508U, 10734624U },
+ { 1923019697U, 730U, 3821419629U },
+ { 442079932U, 560U, 3440032343U },
+ { 1961302714U, 845U, 841962572U },
+ { 2030205964U, 962U, 1325144227U },
+ { 1160407529U, 507U, 240940858U },
+ { 635482502U, 779U, 4200489746U },
+ { 1252788931U, 699U, 867195434U },
+ { 1961817131U, 719U, 668237657U },
+ { 1071468216U, 983U, 917876630U },
+ { 1281848367U, 932U, 1003100039U },
+ { 582537119U, 780U, 1127273778U },
+ { 1973672777U, 853U, 1071368872U },
+ { 1896756996U, 762U, 1127851055U },
+ { 847917054U, 500U, 1717499075U },
+ { 1240520510U, 951U, 2849576657U },
+ { 1685071682U, 567U, 1961810396U },
+ { 1516232129U, 557U, 3173877U },
+ { 1208118903U, 612U, 1613145022U },
+ { 1817269927U, 693U, 4279122573U },
+ { 1510091701U, 717U, 638191229U },
+ { 365916850U, 807U, 600424314U },
+ { 399324359U, 702U, 1803598116U },
+ { 1318480274U, 779U, 2074237022U },
+ { 697758115U, 840U, 1483639402U },
+ { 1696507773U, 840U, 577415447U },
+ { 2081979121U, 981U, 3041486449U },
+ { 955646687U, 742U, 3846494357U },
+ { 1250683506U, 749U, 836419859U },
+ { 595003102U, 534U, 366794109U },
+ { 47485338U, 558U, 3521120834U },
+ { 619433479U, 610U, 3991783875U },
+ { 704096520U, 518U, 4139493852U },
+ { 1712224984U, 606U, 2393312003U },
+ { 1318233152U, 922U, 3880361134U },
+ { 855572992U, 761U, 1472974787U },
+ { 64721421U, 703U, 683860550U },
+ { 678931758U, 840U, 380616043U },
+ { 692711973U, 778U, 1382361947U },
+ { 677703619U, 530U, 2826914161U },
+ { 92393223U, 586U, 1522128471U },
+ { 1222592920U, 743U, 3466726667U },
+ { 358288986U, 695U, 1091956998U },
+ { 1935056945U, 958U, 514864477U },
+ { 735675993U, 990U, 1294239989U },
+ { 1560089402U, 897U, 2238551287U },
+ { 70616361U, 829U, 22483098U },
+ { 368234700U, 731U, 2913875084U },
+ { 20221190U, 879U, 1564152970U },
+ { 539444654U, 682U, 1835141259U },
+ { 1314987297U, 840U, 1801114136U },
+ { 2019295544U, 645U, 3286438930U },
+ { 469023838U, 716U, 1637918202U },
+ { 1843754496U, 653U, 2562092152U },
+ { 400672036U, 809U, 4264212785U },
+ { 404722249U, 965U, 2704116999U },
+ { 600702209U, 758U, 584979986U },
+ { 519953954U, 667U, 2574436237U },
+ { 1658071126U, 694U, 2214569490U },
+ { 420480037U, 749U, 3430010866U },
+ { 690103647U, 969U, 3700758083U },
+ { 1029424799U, 937U, 3787746841U },
+ { 2012608669U, 506U, 3362628973U },
+ { 1535432887U, 998U, 42610943U },
+ { 1330635533U, 857U, 3040806504U },
+ { 1223800550U, 539U, 3954229517U },
+ { 1322411537U, 680U, 3223250324U },
+ { 1877847898U, 945U, 2915147143U },
+ { 1646356099U, 874U, 965988280U },
+ { 805687536U, 744U, 4032277920U },
+ { 1948093210U, 633U, 1346597684U },
+ { 392609744U, 783U, 1636083295U },
+ { 690241304U, 770U, 1201031298U },
+ { 1360302965U, 696U, 1665394461U },
+ { 1220090946U, 780U, 1316922812U },
+ { 447092251U, 500U, 3438743375U },
+ { 1613868791U, 592U, 828546883U },
+ { 523430951U, 548U, 2552392304U },
+ { 726692899U, 810U, 1656872867U },
+ { 1364340021U, 836U, 3710513486U },
+ { 1986257729U, 931U, 935013962U },
+ { 407983964U, 921U, 728767059U },
+};
+
+static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed)
+{
+#define LCG(x) ((x) * 69069U) /* super-duper LCG */
+ state->s1 = __seed(LCG(seed), 2U);
+ state->s2 = __seed(LCG(state->s1), 8U);
+ state->s3 = __seed(LCG(state->s2), 16U);
+ state->s4 = __seed(LCG(state->s3), 128U);
+}
+
+static int __init prandom_state_selftest(void)
+{
+ int i, j, errors = 0, runs = 0;
+ bool error = false;
+
+ for (i = 0; i < ARRAY_SIZE(test1); i++) {
+ struct rnd_state state;
+
+ prandom_state_selftest_seed(&state, test1[i].seed);
+ prandom_warmup(&state);
+
+ if (test1[i].result != prandom_u32_state(&state))
+ error = true;
+ }
+
+ if (error)
+ pr_warn("prandom: seed boundary self test failed\n");
+ else
+ pr_info("prandom: seed boundary self test passed\n");
+
+ for (i = 0; i < ARRAY_SIZE(test2); i++) {
+ struct rnd_state state;
+
+ prandom_state_selftest_seed(&state, test2[i].seed);
+ prandom_warmup(&state);
+
+ for (j = 0; j < test2[i].iteration - 1; j++)
+ prandom_u32_state(&state);
+
+ if (test2[i].result != prandom_u32_state(&state))
+ errors++;
+
+ runs++;
+ cond_resched();
+ }
+
+ if (errors)
+ pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
+ else
+ pr_info("prandom: %d self tests passed\n", runs);
+ return 0;
+}
+core_initcall(prandom_state_selftest);
+#endif
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
new file mode 100644
index 000000000..ce945c179
--- /dev/null
+++ b/lib/ratelimit.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ratelimit.c - Do something with rate limit.
+ *
+ * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
+ *
+ * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
+ * parameter. Now every user can use their own standalone ratelimit_state.
+ */
+
+#include <linux/ratelimit.h>
+#include <linux/jiffies.h>
+#include <linux/export.h>
+
+/*
+ * __ratelimit - rate limiting
+ * @rs: ratelimit_state data
+ * @func: name of calling function
+ *
+ * This enforces a rate limit: not more than @rs->burst callbacks
+ * in every @rs->interval
+ *
+ * RETURNS:
+ * 0 means callbacks will be suppressed.
+ * 1 means go ahead and do it.
+ */
+int ___ratelimit(struct ratelimit_state *rs, const char *func)
+{
+ /* Paired with WRITE_ONCE() in .proc_handler().
+ * Changing two values seperately could be inconsistent
+ * and some message could be lost. (See: net_ratelimit_state).
+ */
+ int interval = READ_ONCE(rs->interval);
+ int burst = READ_ONCE(rs->burst);
+ unsigned long flags;
+ int ret;
+
+ if (!interval)
+ return 1;
+
+ /*
+ * If we contend on this state's lock then almost
+ * by definition we are too busy to print a message,
+ * in addition to the one that will be printed by
+ * the entity that is holding the lock already:
+ */
+ if (!raw_spin_trylock_irqsave(&rs->lock, flags))
+ return 0;
+
+ if (!rs->begin)
+ rs->begin = jiffies;
+
+ if (time_is_before_jiffies(rs->begin + interval)) {
+ if (rs->missed) {
+ if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
+ printk_deferred(KERN_WARNING
+ "%s: %d callbacks suppressed\n",
+ func, rs->missed);
+ rs->missed = 0;
+ }
+ }
+ rs->begin = jiffies;
+ rs->printed = 0;
+ }
+ if (burst && burst > rs->printed) {
+ rs->printed++;
+ ret = 1;
+ } else {
+ rs->missed++;
+ ret = 0;
+ }
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/rbtree.c b/lib/rbtree.c
new file mode 100644
index 000000000..5114eda63
--- /dev/null
+++ b/lib/rbtree.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+ (C) 2002 David Woodhouse <dwmw2@infradead.org>
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
+
+ linux/lib/rbtree.c
+*/
+
+#include <linux/rbtree_augmented.h>
+#include <linux/export.h>
+
+/*
+ * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree
+ *
+ * 1) A node is either red or black
+ * 2) The root is black
+ * 3) All leaves (NULL) are black
+ * 4) Both children of every red node are black
+ * 5) Every simple path from root to leaves contains the same number
+ * of black nodes.
+ *
+ * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
+ * consecutive red nodes in a path and every red node is therefore followed by
+ * a black. So if B is the number of black nodes on every simple path (as per
+ * 5), then the longest possible path due to 4 is 2B.
+ *
+ * We shall indicate color with case, where black nodes are uppercase and red
+ * nodes will be lowercase. Unknown color nodes shall be drawn as red within
+ * parentheses and have some accompanying text comment.
+ */
+
+/*
+ * Notes on lockless lookups:
+ *
+ * All stores to the tree structure (rb_left and rb_right) must be done using
+ * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
+ * tree structure as seen in program order.
+ *
+ * These two requirements will allow lockless iteration of the tree -- not
+ * correct iteration mind you, tree rotations are not atomic so a lookup might
+ * miss entire subtrees.
+ *
+ * But they do guarantee that any such traversal will only see valid elements
+ * and that it will indeed complete -- does not get stuck in a loop.
+ *
+ * It also guarantees that if the lookup returns an element it is the 'correct'
+ * one. But not returning an element does _NOT_ mean it's not present.
+ *
+ * NOTE:
+ *
+ * Stores to __rb_parent_color are not important for simple lookups so those
+ * are left undone as of now. Nor did I check for loops involving parent
+ * pointers.
+ */
+
+static inline void rb_set_black(struct rb_node *rb)
+{
+ rb->__rb_parent_color += RB_BLACK;
+}
+
+static inline struct rb_node *rb_red_parent(struct rb_node *red)
+{
+ return (struct rb_node *)red->__rb_parent_color;
+}
+
+/*
+ * Helper function for rotations:
+ * - old's parent and color get assigned to new
+ * - old gets assigned new as a parent and 'color' as a color.
+ */
+static inline void
+__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
+ struct rb_root *root, int color)
+{
+ struct rb_node *parent = rb_parent(old);
+ new->__rb_parent_color = old->__rb_parent_color;
+ rb_set_parent_color(old, new, color);
+ __rb_change_child(old, new, parent, root);
+}
+
+static __always_inline void
+__rb_insert(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
+
+ while (true) {
+ /*
+ * Loop invariant: node is red.
+ */
+ if (unlikely(!parent)) {
+ /*
+ * The inserted node is root. Either this is the
+ * first node, or we recursed at Case 1 below and
+ * are no longer violating 4).
+ */
+ rb_set_parent_color(node, NULL, RB_BLACK);
+ break;
+ }
+
+ /*
+ * If there is a black parent, we are done.
+ * Otherwise, take some corrective action as,
+ * per 4), we don't want a red root or two
+ * consecutive red nodes.
+ */
+ if(rb_is_black(parent))
+ break;
+
+ gparent = rb_red_parent(parent);
+
+ tmp = gparent->rb_right;
+ if (parent != tmp) { /* parent == gparent->rb_left */
+ if (tmp && rb_is_red(tmp)) {
+ /*
+ * Case 1 - node's uncle is red (color flips).
+ *
+ * G g
+ * / \ / \
+ * p u --> P U
+ * / /
+ * n n
+ *
+ * However, since g's parent might be red, and
+ * 4) does not allow this, we need to recurse
+ * at g.
+ */
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ rb_set_parent_color(parent, gparent, RB_BLACK);
+ node = gparent;
+ parent = rb_parent(node);
+ rb_set_parent_color(node, parent, RB_RED);
+ continue;
+ }
+
+ tmp = parent->rb_right;
+ if (node == tmp) {
+ /*
+ * Case 2 - node's uncle is black and node is
+ * the parent's right child (left rotate at parent).
+ *
+ * G G
+ * / \ / \
+ * p U --> n U
+ * \ /
+ * n p
+ *
+ * This still leaves us in violation of 4), the
+ * continuation into Case 3 will fix that.
+ */
+ tmp = node->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp);
+ WRITE_ONCE(node->rb_left, parent);
+ if (tmp)
+ rb_set_parent_color(tmp, parent,
+ RB_BLACK);
+ rb_set_parent_color(parent, node, RB_RED);
+ augment_rotate(parent, node);
+ parent = node;
+ tmp = node->rb_right;
+ }
+
+ /*
+ * Case 3 - node's uncle is black and node is
+ * the parent's left child (right rotate at gparent).
+ *
+ * G P
+ * / \ / \
+ * p U --> n g
+ * / \
+ * n U
+ */
+ WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
+ WRITE_ONCE(parent->rb_right, gparent);
+ if (tmp)
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+ augment_rotate(gparent, parent);
+ break;
+ } else {
+ tmp = gparent->rb_left;
+ if (tmp && rb_is_red(tmp)) {
+ /* Case 1 - color flips */
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ rb_set_parent_color(parent, gparent, RB_BLACK);
+ node = gparent;
+ parent = rb_parent(node);
+ rb_set_parent_color(node, parent, RB_RED);
+ continue;
+ }
+
+ tmp = parent->rb_left;
+ if (node == tmp) {
+ /* Case 2 - right rotate at parent */
+ tmp = node->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp);
+ WRITE_ONCE(node->rb_right, parent);
+ if (tmp)
+ rb_set_parent_color(tmp, parent,
+ RB_BLACK);
+ rb_set_parent_color(parent, node, RB_RED);
+ augment_rotate(parent, node);
+ parent = node;
+ tmp = node->rb_left;
+ }
+
+ /* Case 3 - left rotate at gparent */
+ WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
+ WRITE_ONCE(parent->rb_left, gparent);
+ if (tmp)
+ rb_set_parent_color(tmp, gparent, RB_BLACK);
+ __rb_rotate_set_parents(gparent, parent, root, RB_RED);
+ augment_rotate(gparent, parent);
+ break;
+ }
+ }
+}
+
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
+
+ while (true) {
+ /*
+ * Loop invariants:
+ * - node is black (or NULL on first iteration)
+ * - node is not the root (parent is not NULL)
+ * - All leaf paths going through parent and node have a
+ * black node count that is 1 lower than other leaf paths.
+ */
+ sibling = parent->rb_right;
+ if (node != sibling) { /* node == parent->rb_left */
+ if (rb_is_red(sibling)) {
+ /*
+ * Case 1 - left rotate at parent
+ *
+ * P S
+ * / \ / \
+ * N s --> p Sr
+ * / \ / \
+ * Sl Sr N Sl
+ */
+ tmp1 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp1);
+ WRITE_ONCE(sibling->rb_left, parent);
+ rb_set_parent_color(tmp1, parent, RB_BLACK);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_RED);
+ augment_rotate(parent, sibling);
+ sibling = tmp1;
+ }
+ tmp1 = sibling->rb_right;
+ if (!tmp1 || rb_is_black(tmp1)) {
+ tmp2 = sibling->rb_left;
+ if (!tmp2 || rb_is_black(tmp2)) {
+ /*
+ * Case 2 - sibling color flip
+ * (p could be either color here)
+ *
+ * (p) (p)
+ * / \ / \
+ * N S --> N s
+ * / \ / \
+ * Sl Sr Sl Sr
+ *
+ * This leaves us violating 5) which
+ * can be fixed by flipping p to black
+ * if it was red, or by recursing at p.
+ * p is red when coming from Case 1.
+ */
+ rb_set_parent_color(sibling, parent,
+ RB_RED);
+ if (rb_is_red(parent))
+ rb_set_black(parent);
+ else {
+ node = parent;
+ parent = rb_parent(node);
+ if (parent)
+ continue;
+ }
+ break;
+ }
+ /*
+ * Case 3 - right rotate at sibling
+ * (p could be either color here)
+ *
+ * (p) (p)
+ * / \ / \
+ * N S --> N sl
+ * / \ \
+ * sl Sr S
+ * \
+ * Sr
+ *
+ * Note: p might be red, and then both
+ * p and sl are red after rotation(which
+ * breaks property 4). This is fixed in
+ * Case 4 (in __rb_rotate_set_parents()
+ * which set sl the color of p
+ * and set p RB_BLACK)
+ *
+ * (p) (sl)
+ * / \ / \
+ * N sl --> P S
+ * \ / \
+ * S N Sr
+ * \
+ * Sr
+ */
+ tmp1 = tmp2->rb_right;
+ WRITE_ONCE(sibling->rb_left, tmp1);
+ WRITE_ONCE(tmp2->rb_right, sibling);
+ WRITE_ONCE(parent->rb_right, tmp2);
+ if (tmp1)
+ rb_set_parent_color(tmp1, sibling,
+ RB_BLACK);
+ augment_rotate(sibling, tmp2);
+ tmp1 = sibling;
+ sibling = tmp2;
+ }
+ /*
+ * Case 4 - left rotate at parent + color flips
+ * (p and sl could be either color here.
+ * After rotation, p becomes black, s acquires
+ * p's color, and sl keeps its color)
+ *
+ * (p) (s)
+ * / \ / \
+ * N S --> P Sr
+ * / \ / \
+ * (sl) sr N (sl)
+ */
+ tmp2 = sibling->rb_left;
+ WRITE_ONCE(parent->rb_right, tmp2);
+ WRITE_ONCE(sibling->rb_left, parent);
+ rb_set_parent_color(tmp1, sibling, RB_BLACK);
+ if (tmp2)
+ rb_set_parent(tmp2, parent);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_BLACK);
+ augment_rotate(parent, sibling);
+ break;
+ } else {
+ sibling = parent->rb_left;
+ if (rb_is_red(sibling)) {
+ /* Case 1 - right rotate at parent */
+ tmp1 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp1);
+ WRITE_ONCE(sibling->rb_right, parent);
+ rb_set_parent_color(tmp1, parent, RB_BLACK);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_RED);
+ augment_rotate(parent, sibling);
+ sibling = tmp1;
+ }
+ tmp1 = sibling->rb_left;
+ if (!tmp1 || rb_is_black(tmp1)) {
+ tmp2 = sibling->rb_right;
+ if (!tmp2 || rb_is_black(tmp2)) {
+ /* Case 2 - sibling color flip */
+ rb_set_parent_color(sibling, parent,
+ RB_RED);
+ if (rb_is_red(parent))
+ rb_set_black(parent);
+ else {
+ node = parent;
+ parent = rb_parent(node);
+ if (parent)
+ continue;
+ }
+ break;
+ }
+ /* Case 3 - left rotate at sibling */
+ tmp1 = tmp2->rb_left;
+ WRITE_ONCE(sibling->rb_right, tmp1);
+ WRITE_ONCE(tmp2->rb_left, sibling);
+ WRITE_ONCE(parent->rb_left, tmp2);
+ if (tmp1)
+ rb_set_parent_color(tmp1, sibling,
+ RB_BLACK);
+ augment_rotate(sibling, tmp2);
+ tmp1 = sibling;
+ sibling = tmp2;
+ }
+ /* Case 4 - right rotate at parent + color flips */
+ tmp2 = sibling->rb_right;
+ WRITE_ONCE(parent->rb_left, tmp2);
+ WRITE_ONCE(sibling->rb_right, parent);
+ rb_set_parent_color(tmp1, sibling, RB_BLACK);
+ if (tmp2)
+ rb_set_parent(tmp2, parent);
+ __rb_rotate_set_parents(parent, sibling, root,
+ RB_BLACK);
+ augment_rotate(parent, sibling);
+ break;
+ }
+ }
+}
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ ____rb_erase_color(parent, root, augment_rotate);
+}
+EXPORT_SYMBOL(__rb_erase_color);
+
+/*
+ * Non-augmented rbtree manipulation functions.
+ *
+ * We use dummy augmented callbacks here, and have the compiler optimize them
+ * out of the rb_insert_color() and rb_erase() function definitions.
+ */
+
+static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
+static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
+static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
+
+static const struct rb_augment_callbacks dummy_callbacks = {
+ .propagate = dummy_propagate,
+ .copy = dummy_copy,
+ .rotate = dummy_rotate
+};
+
+void rb_insert_color(struct rb_node *node, struct rb_root *root)
+{
+ __rb_insert(node, root, dummy_rotate);
+}
+EXPORT_SYMBOL(rb_insert_color);
+
+void rb_erase(struct rb_node *node, struct rb_root *root)
+{
+ struct rb_node *rebalance;
+ rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+ if (rebalance)
+ ____rb_erase_color(rebalance, root, dummy_rotate);
+}
+EXPORT_SYMBOL(rb_erase);
+
+/*
+ * Augmented rbtree manipulation functions.
+ *
+ * This instantiates the same __always_inline functions as in the non-augmented
+ * case, but this time with user-defined callbacks.
+ */
+
+void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+ __rb_insert(node, root, augment_rotate);
+}
+EXPORT_SYMBOL(__rb_insert_augmented);
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+struct rb_node *rb_first(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_left)
+ n = n->rb_left;
+ return n;
+}
+EXPORT_SYMBOL(rb_first);
+
+struct rb_node *rb_last(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_right)
+ n = n->rb_right;
+ return n;
+}
+EXPORT_SYMBOL(rb_last);
+
+struct rb_node *rb_next(const struct rb_node *node)
+{
+ struct rb_node *parent;
+
+ if (RB_EMPTY_NODE(node))
+ return NULL;
+
+ /*
+ * If we have a right-hand child, go down and then left as far
+ * as we can.
+ */
+ if (node->rb_right) {
+ node = node->rb_right;
+ while (node->rb_left)
+ node = node->rb_left;
+ return (struct rb_node *)node;
+ }
+
+ /*
+ * No right-hand children. Everything down and left is smaller than us,
+ * so any 'next' node must be in the general direction of our parent.
+ * Go up the tree; any time the ancestor is a right-hand child of its
+ * parent, keep going up. First time it's a left-hand child of its
+ * parent, said parent is our 'next' node.
+ */
+ while ((parent = rb_parent(node)) && node == parent->rb_right)
+ node = parent;
+
+ return parent;
+}
+EXPORT_SYMBOL(rb_next);
+
+struct rb_node *rb_prev(const struct rb_node *node)
+{
+ struct rb_node *parent;
+
+ if (RB_EMPTY_NODE(node))
+ return NULL;
+
+ /*
+ * If we have a left-hand child, go down and then right as far
+ * as we can.
+ */
+ if (node->rb_left) {
+ node = node->rb_left;
+ while (node->rb_right)
+ node = node->rb_right;
+ return (struct rb_node *)node;
+ }
+
+ /*
+ * No left-hand children. Go up till we find an ancestor which
+ * is a right-hand child of its parent.
+ */
+ while ((parent = rb_parent(node)) && node == parent->rb_left)
+ node = parent;
+
+ return parent;
+}
+EXPORT_SYMBOL(rb_prev);
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root)
+{
+ struct rb_node *parent = rb_parent(victim);
+
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+
+ /* Set the surrounding nodes to point to the replacement */
+ if (victim->rb_left)
+ rb_set_parent(victim->rb_left, new);
+ if (victim->rb_right)
+ rb_set_parent(victim->rb_right, new);
+ __rb_change_child(victim, new, parent, root);
+}
+EXPORT_SYMBOL(rb_replace_node);
+
+void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root)
+{
+ struct rb_node *parent = rb_parent(victim);
+
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+
+ /* Set the surrounding nodes to point to the replacement */
+ if (victim->rb_left)
+ rb_set_parent(victim->rb_left, new);
+ if (victim->rb_right)
+ rb_set_parent(victim->rb_right, new);
+
+ /* Set the parent's pointer to the new node last after an RCU barrier
+ * so that the pointers onwards are seen to be set correctly when doing
+ * an RCU walk over the tree.
+ */
+ __rb_change_child_rcu(victim, new, parent, root);
+}
+EXPORT_SYMBOL(rb_replace_node_rcu);
+
+static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+{
+ for (;;) {
+ if (node->rb_left)
+ node = node->rb_left;
+ else if (node->rb_right)
+ node = node->rb_right;
+ else
+ return (struct rb_node *)node;
+ }
+}
+
+struct rb_node *rb_next_postorder(const struct rb_node *node)
+{
+ const struct rb_node *parent;
+ if (!node)
+ return NULL;
+ parent = rb_parent(node);
+
+ /* If we're sitting on node, we've already seen our children */
+ if (parent && node == parent->rb_left && parent->rb_right) {
+ /* If we are the parent's left node, go to the parent's right
+ * node then all the way down to the left */
+ return rb_left_deepest_node(parent->rb_right);
+ } else
+ /* Otherwise we are the parent's right node, and the parent
+ * should be next */
+ return (struct rb_node *)parent;
+}
+EXPORT_SYMBOL(rb_next_postorder);
+
+struct rb_node *rb_first_postorder(const struct rb_root *root)
+{
+ if (!root->rb_node)
+ return NULL;
+
+ return rb_left_deepest_node(root->rb_node);
+}
+EXPORT_SYMBOL(rb_first_postorder);
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
new file mode 100644
index 000000000..41ae3c757
--- /dev/null
+++ b/lib/rbtree_test.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rbtree_augmented.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <asm/timex.h>
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+__param(int, nnodes, 100, "Number of nodes in the rb-tree");
+__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
+__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
+
+struct test_node {
+ u32 key;
+ struct rb_node rb;
+
+ /* following fields used for testing augmented rbtree functionality */
+ u32 val;
+ u32 augmented;
+};
+
+static struct rb_root_cached root = RB_ROOT_CACHED;
+static struct test_node *nodes = NULL;
+
+static struct rnd_state rnd;
+
+static void insert(struct test_node *node, struct rb_root_cached *root)
+{
+ struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
+ u32 key = node->key;
+
+ while (*new) {
+ parent = *new;
+ if (key < rb_entry(parent, struct test_node, rb)->key)
+ new = &parent->rb_left;
+ else
+ new = &parent->rb_right;
+ }
+
+ rb_link_node(&node->rb, parent, new);
+ rb_insert_color(&node->rb, &root->rb_root);
+}
+
+static void insert_cached(struct test_node *node, struct rb_root_cached *root)
+{
+ struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
+ u32 key = node->key;
+ bool leftmost = true;
+
+ while (*new) {
+ parent = *new;
+ if (key < rb_entry(parent, struct test_node, rb)->key)
+ new = &parent->rb_left;
+ else {
+ new = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(&node->rb, parent, new);
+ rb_insert_color_cached(&node->rb, root, leftmost);
+}
+
+static inline void erase(struct test_node *node, struct rb_root_cached *root)
+{
+ rb_erase(&node->rb, &root->rb_root);
+}
+
+static inline void erase_cached(struct test_node *node, struct rb_root_cached *root)
+{
+ rb_erase_cached(&node->rb, root);
+}
+
+
+#define NODE_VAL(node) ((node)->val)
+
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct test_node, rb, u32, augmented, NODE_VAL)
+
+static void insert_augmented(struct test_node *node,
+ struct rb_root_cached *root)
+{
+ struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
+ u32 key = node->key;
+ u32 val = node->val;
+ struct test_node *parent;
+
+ while (*new) {
+ rb_parent = *new;
+ parent = rb_entry(rb_parent, struct test_node, rb);
+ if (parent->augmented < val)
+ parent->augmented = val;
+ if (key < parent->key)
+ new = &parent->rb.rb_left;
+ else
+ new = &parent->rb.rb_right;
+ }
+
+ node->augmented = val;
+ rb_link_node(&node->rb, rb_parent, new);
+ rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
+}
+
+static void insert_augmented_cached(struct test_node *node,
+ struct rb_root_cached *root)
+{
+ struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
+ u32 key = node->key;
+ u32 val = node->val;
+ struct test_node *parent;
+ bool leftmost = true;
+
+ while (*new) {
+ rb_parent = *new;
+ parent = rb_entry(rb_parent, struct test_node, rb);
+ if (parent->augmented < val)
+ parent->augmented = val;
+ if (key < parent->key)
+ new = &parent->rb.rb_left;
+ else {
+ new = &parent->rb.rb_right;
+ leftmost = false;
+ }
+ }
+
+ node->augmented = val;
+ rb_link_node(&node->rb, rb_parent, new);
+ rb_insert_augmented_cached(&node->rb, root,
+ leftmost, &augment_callbacks);
+}
+
+
+static void erase_augmented(struct test_node *node, struct rb_root_cached *root)
+{
+ rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
+}
+
+static void erase_augmented_cached(struct test_node *node,
+ struct rb_root_cached *root)
+{
+ rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
+}
+
+static void init(void)
+{
+ int i;
+ for (i = 0; i < nnodes; i++) {
+ nodes[i].key = prandom_u32_state(&rnd);
+ nodes[i].val = prandom_u32_state(&rnd);
+ }
+}
+
+static bool is_red(struct rb_node *rb)
+{
+ return !(rb->__rb_parent_color & 1);
+}
+
+static int black_path_count(struct rb_node *rb)
+{
+ int count;
+ for (count = 0; rb; rb = rb_parent(rb))
+ count += !is_red(rb);
+ return count;
+}
+
+static void check_postorder_foreach(int nr_nodes)
+{
+ struct test_node *cur, *n;
+ int count = 0;
+ rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
+ count++;
+
+ WARN_ON_ONCE(count != nr_nodes);
+}
+
+static void check_postorder(int nr_nodes)
+{
+ struct rb_node *rb;
+ int count = 0;
+ for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb))
+ count++;
+
+ WARN_ON_ONCE(count != nr_nodes);
+}
+
+static void check(int nr_nodes)
+{
+ struct rb_node *rb;
+ int count = 0, blacks = 0;
+ u32 prev_key = 0;
+
+ for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
+ struct test_node *node = rb_entry(rb, struct test_node, rb);
+ WARN_ON_ONCE(node->key < prev_key);
+ WARN_ON_ONCE(is_red(rb) &&
+ (!rb_parent(rb) || is_red(rb_parent(rb))));
+ if (!count)
+ blacks = black_path_count(rb);
+ else
+ WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
+ blacks != black_path_count(rb));
+ prev_key = node->key;
+ count++;
+ }
+
+ WARN_ON_ONCE(count != nr_nodes);
+ WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root.rb_root))) - 1);
+
+ check_postorder(nr_nodes);
+ check_postorder_foreach(nr_nodes);
+}
+
+static void check_augmented(int nr_nodes)
+{
+ struct rb_node *rb;
+
+ check(nr_nodes);
+ for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
+ struct test_node *node = rb_entry(rb, struct test_node, rb);
+ u32 subtree, max = node->val;
+ if (node->rb.rb_left) {
+ subtree = rb_entry(node->rb.rb_left, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ if (node->rb.rb_right) {
+ subtree = rb_entry(node->rb.rb_right, struct test_node,
+ rb)->augmented;
+ if (max < subtree)
+ max = subtree;
+ }
+ WARN_ON_ONCE(node->augmented != max);
+ }
+}
+
+static int __init rbtree_test_init(void)
+{
+ int i, j;
+ cycles_t time1, time2, time;
+ struct rb_node *node;
+
+ nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ printk(KERN_ALERT "rbtree testing");
+
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
+ insert(nodes + j, &root);
+ for (j = 0; j < nnodes; j++)
+ erase(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n",
+ (unsigned long long)time);
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
+ insert_cached(nodes + j, &root);
+ for (j = 0; j < nnodes; j++)
+ erase_cached(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n",
+ (unsigned long long)time);
+
+ for (i = 0; i < nnodes; i++)
+ insert(nodes + i, &root);
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (node = rb_first(&root.rb_root); node; node = rb_next(node))
+ ;
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 3 (latency of inorder traversal): %llu cycles\n",
+ (unsigned long long)time);
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++)
+ node = rb_first(&root.rb_root);
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 4 (latency to fetch first node)\n");
+ printk(" non-cached: %llu cycles\n", (unsigned long long)time);
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++)
+ node = rb_first_cached(&root);
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" cached: %llu cycles\n", (unsigned long long)time);
+
+ for (i = 0; i < nnodes; i++)
+ erase(nodes + i, &root);
+
+ /* run checks */
+ for (i = 0; i < check_loops; i++) {
+ init();
+ for (j = 0; j < nnodes; j++) {
+ check(j);
+ insert(nodes + j, &root);
+ }
+ for (j = 0; j < nnodes; j++) {
+ check(nnodes - j);
+ erase(nodes + j, &root);
+ }
+ check(0);
+ }
+
+ printk(KERN_ALERT "augmented rbtree testing");
+
+ init();
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
+ insert_augmented(nodes + j, &root);
+ for (j = 0; j < nnodes; j++)
+ erase_augmented(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time);
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
+ insert_augmented_cached(nodes + j, &root);
+ for (j = 0; j < nnodes; j++)
+ erase_augmented_cached(nodes + j, &root);
+ }
+
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time);
+
+ for (i = 0; i < check_loops; i++) {
+ init();
+ for (j = 0; j < nnodes; j++) {
+ check_augmented(j);
+ insert_augmented(nodes + j, &root);
+ }
+ for (j = 0; j < nnodes; j++) {
+ check_augmented(nnodes - j);
+ erase_augmented(nodes + j, &root);
+ }
+ check_augmented(0);
+ }
+
+ kfree(nodes);
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void __exit rbtree_test_exit(void)
+{
+ printk(KERN_ALERT "test exit\n");
+}
+
+module_init(rbtree_test_init)
+module_exit(rbtree_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michel Lespinasse");
+MODULE_DESCRIPTION("Red Black Tree test");
diff --git a/lib/rcuref.c b/lib/rcuref.c
new file mode 100644
index 000000000..5ec00a4a6
--- /dev/null
+++ b/lib/rcuref.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * rcuref - A scalable reference count implementation for RCU managed objects
+ *
+ * rcuref is provided to replace open coded reference count implementations
+ * based on atomic_t. It protects explicitely RCU managed objects which can
+ * be visible even after the last reference has been dropped and the object
+ * is heading towards destruction.
+ *
+ * A common usage pattern is:
+ *
+ * get()
+ * rcu_read_lock();
+ * p = get_ptr();
+ * if (p && !atomic_inc_not_zero(&p->refcnt))
+ * p = NULL;
+ * rcu_read_unlock();
+ * return p;
+ *
+ * put()
+ * if (!atomic_dec_return(&->refcnt)) {
+ * remove_ptr(p);
+ * kfree_rcu((p, rcu);
+ * }
+ *
+ * atomic_inc_not_zero() is implemented with a try_cmpxchg() loop which has
+ * O(N^2) behaviour under contention with N concurrent operations.
+ *
+ * rcuref uses atomic_add_negative_relaxed() for the fast path, which scales
+ * better under contention.
+ *
+ * Why not refcount?
+ * =================
+ *
+ * In principle it should be possible to make refcount use the rcuref
+ * scheme, but the destruction race described below cannot be prevented
+ * unless the protected object is RCU managed.
+ *
+ * Theory of operation
+ * ===================
+ *
+ * rcuref uses an unsigned integer reference counter. As long as the
+ * counter value is greater than or equal to RCUREF_ONEREF and not larger
+ * than RCUREF_MAXREF the reference is alive:
+ *
+ * ONEREF MAXREF SATURATED RELEASED DEAD NOREF
+ * 0 0x7FFFFFFF 0x8000000 0xA0000000 0xBFFFFFFF 0xC0000000 0xE0000000 0xFFFFFFFF
+ * <---valid --------> <-------saturation zone-------> <-----dead zone----->
+ *
+ * The get() and put() operations do unconditional increments and
+ * decrements. The result is checked after the operation. This optimizes
+ * for the fast path.
+ *
+ * If the reference count is saturated or dead, then the increments and
+ * decrements are not harmful as the reference count still stays in the
+ * respective zones and is always set back to STATURATED resp. DEAD. The
+ * zones have room for 2^28 racing operations in each direction, which
+ * makes it practically impossible to escape the zones.
+ *
+ * Once the last reference is dropped the reference count becomes
+ * RCUREF_NOREF which forces rcuref_put() into the slowpath operation. The
+ * slowpath then tries to set the reference count from RCUREF_NOREF to
+ * RCUREF_DEAD via a cmpxchg(). This opens a small window where a
+ * concurrent rcuref_get() can acquire the reference count and bring it
+ * back to RCUREF_ONEREF or even drop the reference again and mark it DEAD.
+ *
+ * If the cmpxchg() succeeds then a concurrent rcuref_get() will result in
+ * DEAD + 1, which is inside the dead zone. If that happens the reference
+ * count is put back to DEAD.
+ *
+ * The actual race is possible due to the unconditional increment and
+ * decrements in rcuref_get() and rcuref_put():
+ *
+ * T1 T2
+ * get() put()
+ * if (atomic_add_negative(-1, &ref->refcnt))
+ * succeeds-> atomic_cmpxchg(&ref->refcnt, NOREF, DEAD);
+ *
+ * atomic_add_negative(1, &ref->refcnt); <- Elevates refcount to DEAD + 1
+ *
+ * As the result of T1's add is negative, the get() goes into the slow path
+ * and observes refcnt being in the dead zone which makes the operation fail.
+ *
+ * Possible critical states:
+ *
+ * Context Counter References Operation
+ * T1 0 1 init()
+ * T2 1 2 get()
+ * T1 0 1 put()
+ * T2 -1 0 put() tries to mark dead
+ * T1 0 1 get()
+ * T2 0 1 put() mark dead fails
+ * T1 -1 0 put() tries to mark dead
+ * T1 DEAD 0 put() mark dead succeeds
+ * T2 DEAD+1 0 get() fails and puts it back to DEAD
+ *
+ * Of course there are more complex scenarios, but the above illustrates
+ * the working principle. The rest is left to the imagination of the
+ * reader.
+ *
+ * Deconstruction race
+ * ===================
+ *
+ * The release operation must be protected by prohibiting a grace period in
+ * order to prevent a possible use after free:
+ *
+ * T1 T2
+ * put() get()
+ * // ref->refcnt = ONEREF
+ * if (!atomic_add_negative(-1, &ref->refcnt))
+ * return false; <- Not taken
+ *
+ * // ref->refcnt == NOREF
+ * --> preemption
+ * // Elevates ref->refcnt to ONEREF
+ * if (!atomic_add_negative(1, &ref->refcnt))
+ * return true; <- taken
+ *
+ * if (put(&p->ref)) { <-- Succeeds
+ * remove_pointer(p);
+ * kfree_rcu(p, rcu);
+ * }
+ *
+ * RCU grace period ends, object is freed
+ *
+ * atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); <- UAF
+ *
+ * This is prevented by disabling preemption around the put() operation as
+ * that's in most kernel configurations cheaper than a rcu_read_lock() /
+ * rcu_read_unlock() pair and in many cases even a NOOP. In any case it
+ * prevents the grace period which keeps the object alive until all put()
+ * operations complete.
+ *
+ * Saturation protection
+ * =====================
+ *
+ * The reference count has a saturation limit RCUREF_MAXREF (INT_MAX).
+ * Once this is exceedded the reference count becomes stale by setting it
+ * to RCUREF_SATURATED, which will cause a memory leak, but it prevents
+ * wrap arounds which obviously cause worse problems than a memory
+ * leak. When saturation is reached a warning is emitted.
+ *
+ * Race conditions
+ * ===============
+ *
+ * All reference count increment/decrement operations are unconditional and
+ * only verified after the fact. This optimizes for the good case and takes
+ * the occasional race vs. a dead or already saturated refcount into
+ * account. The saturation and dead zones are large enough to accomodate
+ * for that.
+ *
+ * Memory ordering
+ * ===============
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object to increase the
+ * reference count on will provide the ordering. For locked data
+ * structures, its the lock acquire, for RCU/lockless data structures its
+ * the dependent load.
+ *
+ * rcuref_get() provides a control dependency ordering future stores which
+ * ensures that the object is not modified when acquiring a reference
+ * fails.
+ *
+ * rcuref_put() provides release order, i.e. all prior loads and stores
+ * will be issued before. It also provides a control dependency ordering
+ * against the subsequent destruction of the object.
+ *
+ * If rcuref_put() successfully dropped the last reference and marked the
+ * object DEAD it also provides acquire ordering.
+ */
+
+#include <linux/export.h>
+#include <linux/rcuref.h>
+
+/**
+ * rcuref_get_slowpath - Slowpath of rcuref_get()
+ * @ref: Pointer to the reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * False if the reference count was already marked dead
+ *
+ * True if the reference count is saturated, which prevents the
+ * object from being deconstructed ever.
+ */
+bool rcuref_get_slowpath(rcuref_t *ref)
+{
+ unsigned int cnt = atomic_read(&ref->refcnt);
+
+ /*
+ * If the reference count was already marked dead, undo the
+ * increment so it stays in the middle of the dead zone and return
+ * fail.
+ */
+ if (cnt >= RCUREF_RELEASED) {
+ atomic_set(&ref->refcnt, RCUREF_DEAD);
+ return false;
+ }
+
+ /*
+ * If it was saturated, warn and mark it so. In case the increment
+ * was already on a saturated value restore the saturation
+ * marker. This keeps it in the middle of the saturation zone and
+ * prevents the reference count from overflowing. This leaks the
+ * object memory, but prevents the obvious reference count overflow
+ * damage.
+ */
+ if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory"))
+ atomic_set(&ref->refcnt, RCUREF_SATURATED);
+ return true;
+}
+EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
+
+/**
+ * rcuref_put_slowpath - Slowpath of __rcuref_put()
+ * @ref: Pointer to the reference count
+ *
+ * Invoked when the reference count is outside of the valid zone.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+bool rcuref_put_slowpath(rcuref_t *ref)
+{
+ unsigned int cnt = atomic_read(&ref->refcnt);
+
+ /* Did this drop the last reference? */
+ if (likely(cnt == RCUREF_NOREF)) {
+ /*
+ * Carefully try to set the reference count to RCUREF_DEAD.
+ *
+ * This can fail if a concurrent get() operation has
+ * elevated it again or the corresponding put() even marked
+ * it dead already. Both are valid situations and do not
+ * require a retry. If this fails the caller is not
+ * allowed to deconstruct the object.
+ */
+ if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF)
+ return false;
+
+ /*
+ * The caller can safely schedule the object for
+ * deconstruction. Provide acquire ordering.
+ */
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ /*
+ * If the reference count was already in the dead zone, then this
+ * put() operation is imbalanced. Warn, put the reference count back to
+ * DEAD and tell the caller to not deconstruct the object.
+ */
+ if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) {
+ atomic_set(&ref->refcnt, RCUREF_DEAD);
+ return false;
+ }
+
+ /*
+ * This is a put() operation on a saturated refcount. Restore the
+ * mean saturation value and tell the caller to not deconstruct the
+ * object.
+ */
+ if (cnt > RCUREF_MAXREF)
+ atomic_set(&ref->refcnt, RCUREF_SATURATED);
+ return false;
+}
+EXPORT_SYMBOL_GPL(rcuref_put_slowpath);
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile
new file mode 100644
index 000000000..5d4fa68f2
--- /dev/null
+++ b/lib/reed_solomon/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This is a modified version of reed solomon lib,
+#
+
+obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o
+obj-$(CONFIG_REED_SOLOMON_TEST) += test_rslib.o
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
new file mode 100644
index 000000000..805de84ae
--- /dev/null
+++ b/lib/reed_solomon/decode_rs.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright 2002, Phil Karn, KA9Q
+ * May be used under the terms of the GNU General Public License (GPL)
+ *
+ * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Generic data width independent code which is included by the wrappers.
+ */
+{
+ struct rs_codec *rs = rsc->codec;
+ int deg_lambda, el, deg_omega;
+ int i, j, r, k, pad;
+ int nn = rs->nn;
+ int nroots = rs->nroots;
+ int fcr = rs->fcr;
+ int prim = rs->prim;
+ int iprim = rs->iprim;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
+ int count = 0;
+ int num_corrected;
+ uint16_t msk = (uint16_t) rs->nn;
+
+ /*
+ * The decoder buffers are in the rs control struct. They are
+ * arrays sized [nroots + 1]
+ */
+ uint16_t *lambda = rsc->buffers + RS_DECODE_LAMBDA * (nroots + 1);
+ uint16_t *syn = rsc->buffers + RS_DECODE_SYN * (nroots + 1);
+ uint16_t *b = rsc->buffers + RS_DECODE_B * (nroots + 1);
+ uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1);
+ uint16_t *omega = rsc->buffers + RS_DECODE_OMEGA * (nroots + 1);
+ uint16_t *root = rsc->buffers + RS_DECODE_ROOT * (nroots + 1);
+ uint16_t *reg = rsc->buffers + RS_DECODE_REG * (nroots + 1);
+ uint16_t *loc = rsc->buffers + RS_DECODE_LOC * (nroots + 1);
+
+ /* Check length parameter for validity */
+ pad = nn - nroots - len;
+ BUG_ON(pad < 0 || pad >= nn - nroots);
+
+ /* Does the caller provide the syndrome ? */
+ if (s != NULL) {
+ for (i = 0; i < nroots; i++) {
+ /* The syndrome is in index form,
+ * so nn represents zero
+ */
+ if (s[i] != nn)
+ goto decode;
+ }
+
+ /* syndrome is zero, no errors to correct */
+ return 0;
+ }
+
+ /* form the syndromes; i.e., evaluate data(x) at roots of
+ * g(x) */
+ for (i = 0; i < nroots; i++)
+ syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk;
+
+ for (j = 1; j < len; j++) {
+ for (i = 0; i < nroots; i++) {
+ if (syn[i] == 0) {
+ syn[i] = (((uint16_t) data[j]) ^
+ invmsk) & msk;
+ } else {
+ syn[i] = ((((uint16_t) data[j]) ^
+ invmsk) & msk) ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]] +
+ (fcr + i) * prim)];
+ }
+ }
+ }
+
+ for (j = 0; j < nroots; j++) {
+ for (i = 0; i < nroots; i++) {
+ if (syn[i] == 0) {
+ syn[i] = ((uint16_t) par[j]) & msk;
+ } else {
+ syn[i] = (((uint16_t) par[j]) & msk) ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]] +
+ (fcr+i)*prim)];
+ }
+ }
+ }
+ s = syn;
+
+ /* Convert syndromes to index form, checking for nonzero condition */
+ syn_error = 0;
+ for (i = 0; i < nroots; i++) {
+ syn_error |= s[i];
+ s[i] = index_of[s[i]];
+ }
+
+ if (!syn_error) {
+ /* if syndrome is zero, data[] is a codeword and there are no
+ * errors to correct. So return data[] unmodified
+ */
+ return 0;
+ }
+
+ decode:
+ memset(&lambda[1], 0, nroots * sizeof(lambda[0]));
+ lambda[0] = 1;
+
+ if (no_eras > 0) {
+ /* Init lambda to be the erasure locator polynomial */
+ lambda[1] = alpha_to[rs_modnn(rs,
+ prim * (nn - 1 - (eras_pos[0] + pad)))];
+ for (i = 1; i < no_eras; i++) {
+ u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
+ for (j = i + 1; j > 0; j--) {
+ tmp = index_of[lambda[j - 1]];
+ if (tmp != nn) {
+ lambda[j] ^=
+ alpha_to[rs_modnn(rs, u + tmp)];
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < nroots + 1; i++)
+ b[i] = index_of[lambda[i]];
+
+ /*
+ * Begin Berlekamp-Massey algorithm to determine error+erasure
+ * locator polynomial
+ */
+ r = no_eras;
+ el = no_eras;
+ while (++r <= nroots) { /* r is the step number */
+ /* Compute discrepancy at the r-th step in poly-form */
+ discr_r = 0;
+ for (i = 0; i < r; i++) {
+ if ((lambda[i] != 0) && (s[r - i - 1] != nn)) {
+ discr_r ^=
+ alpha_to[rs_modnn(rs,
+ index_of[lambda[i]] +
+ s[r - i - 1])];
+ }
+ }
+ discr_r = index_of[discr_r]; /* Index form */
+ if (discr_r == nn) {
+ /* 2 lines below: B(x) <-- x*B(x) */
+ memmove (&b[1], b, nroots * sizeof (b[0]));
+ b[0] = nn;
+ } else {
+ /* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */
+ t[0] = lambda[0];
+ for (i = 0; i < nroots; i++) {
+ if (b[i] != nn) {
+ t[i + 1] = lambda[i + 1] ^
+ alpha_to[rs_modnn(rs, discr_r +
+ b[i])];
+ } else
+ t[i + 1] = lambda[i + 1];
+ }
+ if (2 * el <= r + no_eras - 1) {
+ el = r + no_eras - el;
+ /*
+ * 2 lines below: B(x) <-- inv(discr_r) *
+ * lambda(x)
+ */
+ for (i = 0; i <= nroots; i++) {
+ b[i] = (lambda[i] == 0) ? nn :
+ rs_modnn(rs, index_of[lambda[i]]
+ - discr_r + nn);
+ }
+ } else {
+ /* 2 lines below: B(x) <-- x*B(x) */
+ memmove(&b[1], b, nroots * sizeof(b[0]));
+ b[0] = nn;
+ }
+ memcpy(lambda, t, (nroots + 1) * sizeof(t[0]));
+ }
+ }
+
+ /* Convert lambda to index form and compute deg(lambda(x)) */
+ deg_lambda = 0;
+ for (i = 0; i < nroots + 1; i++) {
+ lambda[i] = index_of[lambda[i]];
+ if (lambda[i] != nn)
+ deg_lambda = i;
+ }
+
+ if (deg_lambda == 0) {
+ /*
+ * deg(lambda) is zero even though the syndrome is non-zero
+ * => uncorrectable error detected
+ */
+ return -EBADMSG;
+ }
+
+ /* Find roots of error+erasure locator polynomial by Chien search */
+ memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
+ count = 0; /* Number of roots of lambda(x) */
+ for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) {
+ q = 1; /* lambda[0] is always 0 */
+ for (j = deg_lambda; j > 0; j--) {
+ if (reg[j] != nn) {
+ reg[j] = rs_modnn(rs, reg[j] + j);
+ q ^= alpha_to[reg[j]];
+ }
+ }
+ if (q != 0)
+ continue; /* Not a root */
+
+ if (k < pad) {
+ /* Impossible error location. Uncorrectable error. */
+ return -EBADMSG;
+ }
+
+ /* store root (index-form) and error location number */
+ root[count] = i;
+ loc[count] = k;
+ /* If we've already found max possible roots,
+ * abort the search to save time
+ */
+ if (++count == deg_lambda)
+ break;
+ }
+ if (deg_lambda != count) {
+ /*
+ * deg(lambda) unequal to number of roots => uncorrectable
+ * error detected
+ */
+ return -EBADMSG;
+ }
+ /*
+ * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
+ * x**nroots). in index form. Also find deg(omega).
+ */
+ deg_omega = deg_lambda - 1;
+ for (i = 0; i <= deg_omega; i++) {
+ tmp = 0;
+ for (j = i; j >= 0; j--) {
+ if ((s[i - j] != nn) && (lambda[j] != nn))
+ tmp ^=
+ alpha_to[rs_modnn(rs, s[i - j] + lambda[j])];
+ }
+ omega[i] = index_of[tmp];
+ }
+
+ /*
+ * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
+ * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
+ * Note: we reuse the buffer for b to store the correction pattern
+ */
+ num_corrected = 0;
+ for (j = count - 1; j >= 0; j--) {
+ num1 = 0;
+ for (i = deg_omega; i >= 0; i--) {
+ if (omega[i] != nn)
+ num1 ^= alpha_to[rs_modnn(rs, omega[i] +
+ i * root[j])];
+ }
+
+ if (num1 == 0) {
+ /* Nothing to correct at this position */
+ b[j] = 0;
+ continue;
+ }
+
+ num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
+ den = 0;
+
+ /* lambda[i+1] for i even is the formal derivative
+ * lambda_pr of lambda[i] */
+ for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) {
+ if (lambda[i + 1] != nn) {
+ den ^= alpha_to[rs_modnn(rs, lambda[i + 1] +
+ i * root[j])];
+ }
+ }
+
+ b[j] = alpha_to[rs_modnn(rs, index_of[num1] +
+ index_of[num2] +
+ nn - index_of[den])];
+ num_corrected++;
+ }
+
+ /*
+ * We compute the syndrome of the 'error' and check that it matches
+ * the syndrome of the received word
+ */
+ for (i = 0; i < nroots; i++) {
+ tmp = 0;
+ for (j = 0; j < count; j++) {
+ if (b[j] == 0)
+ continue;
+
+ k = (fcr + i) * prim * (nn-loc[j]-1);
+ tmp ^= alpha_to[rs_modnn(rs, index_of[b[j]] + k)];
+ }
+
+ if (tmp != alpha_to[s[i]])
+ return -EBADMSG;
+ }
+
+ /*
+ * Store the error correction pattern, if a
+ * correction buffer is available
+ */
+ if (corr && eras_pos) {
+ j = 0;
+ for (i = 0; i < count; i++) {
+ if (b[i]) {
+ corr[j] = b[i];
+ eras_pos[j++] = loc[i] - pad;
+ }
+ }
+ } else if (data && par) {
+ /* Apply error to data and parity */
+ for (i = 0; i < count; i++) {
+ if (loc[i] < (nn - nroots))
+ data[loc[i] - pad] ^= b[i];
+ else
+ par[loc[i] - pad - len] ^= b[i];
+ }
+ }
+
+ return num_corrected;
+}
diff --git a/lib/reed_solomon/encode_rs.c b/lib/reed_solomon/encode_rs.c
new file mode 100644
index 000000000..9112d46e8
--- /dev/null
+++ b/lib/reed_solomon/encode_rs.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright 2002, Phil Karn, KA9Q
+ * May be used under the terms of the GNU General Public License (GPL)
+ *
+ * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Generic data width independent code which is included by the wrappers.
+ */
+{
+ struct rs_codec *rs = rsc->codec;
+ int i, j, pad;
+ int nn = rs->nn;
+ int nroots = rs->nroots;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ uint16_t *genpoly = rs->genpoly;
+ uint16_t fb;
+ uint16_t msk = (uint16_t) rs->nn;
+
+ /* Check length parameter for validity */
+ pad = nn - nroots - len;
+ if (pad < 0 || pad >= nn)
+ return -ERANGE;
+
+ for (i = 0; i < len; i++) {
+ fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]];
+ /* feedback term is non-zero */
+ if (fb != nn) {
+ for (j = 1; j < nroots; j++) {
+ par[j] ^= alpha_to[rs_modnn(rs, fb +
+ genpoly[nroots - j])];
+ }
+ }
+ /* Shift */
+ memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1));
+ if (fb != nn) {
+ par[nroots - 1] = alpha_to[rs_modnn(rs,
+ fb + genpoly[0])];
+ } else {
+ par[nroots - 1] = 0;
+ }
+ }
+ return 0;
+}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
new file mode 100644
index 000000000..bbc01bad3
--- /dev/null
+++ b/lib/reed_solomon/reed_solomon.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Reed Solomon code lifted from reed solomon library written by Phil Karn
+ * Copyright 2002 Phil Karn, KA9Q
+ *
+ * Description:
+ *
+ * The generic Reed Solomon library provides runtime configurable
+ * encoding / decoding of RS codes.
+ *
+ * Each user must call init_rs to get a pointer to a rs_control structure
+ * for the given rs parameters. The control struct is unique per instance.
+ * It points to a codec which can be shared by multiple control structures.
+ * If a codec is newly allocated then the polynomial arrays for fast
+ * encoding / decoding are built. This can take some time so make sure not
+ * to call this function from a time critical path. Usually a module /
+ * driver should initialize the necessary rs_control structure on module /
+ * driver init and release it on exit.
+ *
+ * The encoding puts the calculated syndrome into a given syndrome buffer.
+ *
+ * The decoding is a two step process. The first step calculates the
+ * syndrome over the received (data + syndrome) and calls the second stage,
+ * which does the decoding / error correction itself. Many hw encoders
+ * provide a syndrome calculation over the received data + syndrome and can
+ * call the second stage directly.
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+enum {
+ RS_DECODE_LAMBDA,
+ RS_DECODE_SYN,
+ RS_DECODE_B,
+ RS_DECODE_T,
+ RS_DECODE_OMEGA,
+ RS_DECODE_ROOT,
+ RS_DECODE_REG,
+ RS_DECODE_LOC,
+ RS_DECODE_NUM_BUFFERS
+};
+
+/* This list holds all currently allocated rs codec structures */
+static LIST_HEAD(codec_list);
+/* Protection for the list */
+static DEFINE_MUTEX(rslistlock);
+
+/**
+ * codec_init - Initialize a Reed-Solomon codec
+ * @symsize: symbol size, bits (1-8)
+ * @gfpoly: Field generator polynomial coefficients
+ * @gffunc: Field generator function
+ * @fcr: first root of RS code generator polynomial, index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: GFP_ flags for allocations
+ *
+ * Allocate a codec structure and the polynom arrays for faster
+ * en/decoding. Fill the arrays according to the given parameters.
+ */
+static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int),
+ int fcr, int prim, int nroots, gfp_t gfp)
+{
+ int i, j, sr, root, iprim;
+ struct rs_codec *rs;
+
+ rs = kzalloc(sizeof(*rs), gfp);
+ if (!rs)
+ return NULL;
+
+ INIT_LIST_HEAD(&rs->list);
+
+ rs->mm = symsize;
+ rs->nn = (1 << symsize) - 1;
+ rs->fcr = fcr;
+ rs->prim = prim;
+ rs->nroots = nroots;
+ rs->gfpoly = gfpoly;
+ rs->gffunc = gffunc;
+
+ /* Allocate the arrays */
+ rs->alpha_to = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
+ if (rs->alpha_to == NULL)
+ goto err;
+
+ rs->index_of = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
+ if (rs->index_of == NULL)
+ goto err;
+
+ rs->genpoly = kmalloc_array(rs->nroots + 1, sizeof(uint16_t), gfp);
+ if(rs->genpoly == NULL)
+ goto err;
+
+ /* Generate Galois field lookup tables */
+ rs->index_of[0] = rs->nn; /* log(zero) = -inf */
+ rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */
+ if (gfpoly) {
+ sr = 1;
+ for (i = 0; i < rs->nn; i++) {
+ rs->index_of[sr] = i;
+ rs->alpha_to[i] = sr;
+ sr <<= 1;
+ if (sr & (1 << symsize))
+ sr ^= gfpoly;
+ sr &= rs->nn;
+ }
+ } else {
+ sr = gffunc(0);
+ for (i = 0; i < rs->nn; i++) {
+ rs->index_of[sr] = i;
+ rs->alpha_to[i] = sr;
+ sr = gffunc(sr);
+ }
+ }
+ /* If it's not primitive, exit */
+ if(sr != rs->alpha_to[0])
+ goto err;
+
+ /* Find prim-th root of 1, used in decoding */
+ for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn);
+ /* prim-th root of 1, index form */
+ rs->iprim = iprim / prim;
+
+ /* Form RS code generator polynomial from its roots */
+ rs->genpoly[0] = 1;
+ for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) {
+ rs->genpoly[i + 1] = 1;
+ /* Multiply rs->genpoly[] by @**(root + x) */
+ for (j = i; j > 0; j--) {
+ if (rs->genpoly[j] != 0) {
+ rs->genpoly[j] = rs->genpoly[j -1] ^
+ rs->alpha_to[rs_modnn(rs,
+ rs->index_of[rs->genpoly[j]] + root)];
+ } else
+ rs->genpoly[j] = rs->genpoly[j - 1];
+ }
+ /* rs->genpoly[0] can never be zero */
+ rs->genpoly[0] =
+ rs->alpha_to[rs_modnn(rs,
+ rs->index_of[rs->genpoly[0]] + root)];
+ }
+ /* convert rs->genpoly[] to index form for quicker encoding */
+ for (i = 0; i <= nroots; i++)
+ rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
+
+ rs->users = 1;
+ list_add(&rs->list, &codec_list);
+ return rs;
+
+err:
+ kfree(rs->genpoly);
+ kfree(rs->index_of);
+ kfree(rs->alpha_to);
+ kfree(rs);
+ return NULL;
+}
+
+
+/**
+ * free_rs - Free the rs control structure
+ * @rs: The control structure which is not longer used by the
+ * caller
+ *
+ * Free the control structure. If @rs is the last user of the associated
+ * codec, free the codec as well.
+ */
+void free_rs(struct rs_control *rs)
+{
+ struct rs_codec *cd;
+
+ if (!rs)
+ return;
+
+ cd = rs->codec;
+ mutex_lock(&rslistlock);
+ cd->users--;
+ if(!cd->users) {
+ list_del(&cd->list);
+ kfree(cd->alpha_to);
+ kfree(cd->index_of);
+ kfree(cd->genpoly);
+ kfree(cd);
+ }
+ mutex_unlock(&rslistlock);
+ kfree(rs);
+}
+EXPORT_SYMBOL_GPL(free_rs);
+
+/**
+ * init_rs_internal - Allocate rs control, find a matching codec or allocate a new one
+ * @symsize: the symbol size (number of bits)
+ * @gfpoly: the extended Galois field generator polynomial coefficients,
+ * with the 0th coefficient in the low order bit. The polynomial
+ * must be primitive;
+ * @gffunc: pointer to function to generate the next field element,
+ * or the multiplicative identity element if given 0. Used
+ * instead of gfpoly if gfpoly is 0
+ * @fcr: the first consecutive root of the rs code generator polynomial
+ * in index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: GFP_ flags for allocations
+ */
+static struct rs_control *init_rs_internal(int symsize, int gfpoly,
+ int (*gffunc)(int), int fcr,
+ int prim, int nroots, gfp_t gfp)
+{
+ struct list_head *tmp;
+ struct rs_control *rs;
+ unsigned int bsize;
+
+ /* Sanity checks */
+ if (symsize < 1)
+ return NULL;
+ if (fcr < 0 || fcr >= (1<<symsize))
+ return NULL;
+ if (prim <= 0 || prim >= (1<<symsize))
+ return NULL;
+ if (nroots < 0 || nroots >= (1<<symsize))
+ return NULL;
+
+ /*
+ * The decoder needs buffers in each control struct instance to
+ * avoid variable size or large fixed size allocations on
+ * stack. Size the buffers to arrays of [nroots + 1].
+ */
+ bsize = sizeof(uint16_t) * RS_DECODE_NUM_BUFFERS * (nroots + 1);
+ rs = kzalloc(sizeof(*rs) + bsize, gfp);
+ if (!rs)
+ return NULL;
+
+ mutex_lock(&rslistlock);
+
+ /* Walk through the list and look for a matching entry */
+ list_for_each(tmp, &codec_list) {
+ struct rs_codec *cd = list_entry(tmp, struct rs_codec, list);
+
+ if (symsize != cd->mm)
+ continue;
+ if (gfpoly != cd->gfpoly)
+ continue;
+ if (gffunc != cd->gffunc)
+ continue;
+ if (fcr != cd->fcr)
+ continue;
+ if (prim != cd->prim)
+ continue;
+ if (nroots != cd->nroots)
+ continue;
+ /* We have a matching one already */
+ cd->users++;
+ rs->codec = cd;
+ goto out;
+ }
+
+ /* Create a new one */
+ rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp);
+ if (!rs->codec) {
+ kfree(rs);
+ rs = NULL;
+ }
+out:
+ mutex_unlock(&rslistlock);
+ return rs;
+}
+
+/**
+ * init_rs_gfp - Create a RS control struct and initialize it
+ * @symsize: the symbol size (number of bits)
+ * @gfpoly: the extended Galois field generator polynomial coefficients,
+ * with the 0th coefficient in the low order bit. The polynomial
+ * must be primitive;
+ * @fcr: the first consecutive root of the rs code generator polynomial
+ * in index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ * @gfp: Memory allocation flags.
+ */
+struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim,
+ int nroots, gfp_t gfp)
+{
+ return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp);
+}
+EXPORT_SYMBOL_GPL(init_rs_gfp);
+
+/**
+ * init_rs_non_canonical - Allocate rs control struct for fields with
+ * non-canonical representation
+ * @symsize: the symbol size (number of bits)
+ * @gffunc: pointer to function to generate the next field element,
+ * or the multiplicative identity element if given 0. Used
+ * instead of gfpoly if gfpoly is 0
+ * @fcr: the first consecutive root of the rs code generator polynomial
+ * in index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ */
+struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int),
+ int fcr, int prim, int nroots)
+{
+ return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(init_rs_non_canonical);
+
+#ifdef CONFIG_REED_SOLOMON_ENC8
+/**
+ * encode_rs8 - Calculate the parity for data values (8bit data width)
+ * @rsc: the rs control structure
+ * @data: data field of a given type
+ * @len: data length
+ * @par: parity data, must be initialized by caller (usually all 0)
+ * @invmsk: invert data mask (will be xored on data)
+ *
+ * The parity uses a uint16_t data type to enable
+ * symbol size > 8. The calling code must take care of encoding of the
+ * syndrome result for storage itself.
+ */
+int encode_rs8(struct rs_control *rsc, uint8_t *data, int len, uint16_t *par,
+ uint16_t invmsk)
+{
+#include "encode_rs.c"
+}
+EXPORT_SYMBOL_GPL(encode_rs8);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_DEC8
+/**
+ * decode_rs8 - Decode codeword (8bit data width)
+ * @rsc: the rs control structure
+ * @data: data field of a given type
+ * @par: received parity data field
+ * @len: data length
+ * @s: syndrome data field, must be in index form
+ * (if NULL, syndrome is calculated)
+ * @no_eras: number of erasures
+ * @eras_pos: position of erasures, can be NULL
+ * @invmsk: invert data mask (will be xored on data, not on parity!)
+ * @corr: buffer to store correction bitmask on eras_pos
+ *
+ * The syndrome and parity uses a uint16_t data type to enable
+ * symbol size > 8. The calling code must take care of decoding of the
+ * syndrome result and the received parity before calling this code.
+ *
+ * Note: The rs_control struct @rsc contains buffers which are used for
+ * decoding, so the caller has to ensure that decoder invocations are
+ * serialized.
+ *
+ * Returns the number of corrected symbols or -EBADMSG for uncorrectable
+ * errors. The count includes errors in the parity.
+ */
+int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr)
+{
+#include "decode_rs.c"
+}
+EXPORT_SYMBOL_GPL(decode_rs8);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_ENC16
+/**
+ * encode_rs16 - Calculate the parity for data values (16bit data width)
+ * @rsc: the rs control structure
+ * @data: data field of a given type
+ * @len: data length
+ * @par: parity data, must be initialized by caller (usually all 0)
+ * @invmsk: invert data mask (will be xored on data, not on parity!)
+ *
+ * Each field in the data array contains up to symbol size bits of valid data.
+ */
+int encode_rs16(struct rs_control *rsc, uint16_t *data, int len, uint16_t *par,
+ uint16_t invmsk)
+{
+#include "encode_rs.c"
+}
+EXPORT_SYMBOL_GPL(encode_rs16);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_DEC16
+/**
+ * decode_rs16 - Decode codeword (16bit data width)
+ * @rsc: the rs control structure
+ * @data: data field of a given type
+ * @par: received parity data field
+ * @len: data length
+ * @s: syndrome data field, must be in index form
+ * (if NULL, syndrome is calculated)
+ * @no_eras: number of erasures
+ * @eras_pos: position of erasures, can be NULL
+ * @invmsk: invert data mask (will be xored on data, not on parity!)
+ * @corr: buffer to store correction bitmask on eras_pos
+ *
+ * Each field in the data array contains up to symbol size bits of valid data.
+ *
+ * Note: The rc_control struct @rsc contains buffers which are used for
+ * decoding, so the caller has to ensure that decoder invocations are
+ * serialized.
+ *
+ * Returns the number of corrected symbols or -EBADMSG for uncorrectable
+ * errors. The count includes errors in the parity.
+ */
+int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr)
+{
+#include "decode_rs.c"
+}
+EXPORT_SYMBOL_GPL(decode_rs16);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Reed Solomon encoder/decoder");
+MODULE_AUTHOR("Phil Karn, Thomas Gleixner");
+
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
new file mode 100644
index 000000000..75cb1adac
--- /dev/null
+++ b/lib/reed_solomon/test_rslib.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tests for Generic Reed Solomon encoder / decoder library
+ *
+ * Written by Ferdinand Blomqvist
+ * Based on previous work by Phil Karn, KA9Q
+ */
+#include <linux/rslib.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+enum verbosity {
+ V_SILENT,
+ V_PROGRESS,
+ V_CSUMMARY
+};
+
+enum method {
+ CORR_BUFFER,
+ CALLER_SYNDROME,
+ IN_PLACE
+};
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg)
+
+__param(int, v, V_PROGRESS, "Verbosity level");
+__param(int, ewsc, 1, "Erasures without symbol corruption");
+__param(int, bc, 1, "Test for correct behaviour beyond error correction capacity");
+
+struct etab {
+ int symsize;
+ int genpoly;
+ int fcs;
+ int prim;
+ int nroots;
+ int ntrials;
+};
+
+/* List of codes to test */
+static struct etab Tab[] = {
+ {2, 0x7, 1, 1, 1, 100000 },
+ {3, 0xb, 1, 1, 2, 100000 },
+ {3, 0xb, 1, 1, 3, 100000 },
+ {3, 0xb, 2, 1, 4, 100000 },
+ {4, 0x13, 1, 1, 4, 10000 },
+ {5, 0x25, 1, 1, 6, 1000 },
+ {6, 0x43, 3, 1, 8, 1000 },
+ {7, 0x89, 1, 1, 14, 500 },
+ {8, 0x11d, 1, 1, 30, 100 },
+ {8, 0x187, 112, 11, 32, 100 },
+ {9, 0x211, 1, 1, 33, 80 },
+ {0, 0, 0, 0, 0, 0},
+};
+
+
+struct estat {
+ int dwrong;
+ int irv;
+ int wepos;
+ int nwords;
+};
+
+struct bcstat {
+ int rfail;
+ int rsuccess;
+ int noncw;
+ int nwords;
+};
+
+struct wspace {
+ uint16_t *c; /* sent codeword */
+ uint16_t *r; /* received word */
+ uint16_t *s; /* syndrome */
+ uint16_t *corr; /* correction buffer */
+ int *errlocs;
+ int *derrlocs;
+};
+
+struct pad {
+ int mult;
+ int shift;
+};
+
+static struct pad pad_coef[] = {
+ { 0, 0 },
+ { 1, 2 },
+ { 1, 1 },
+ { 3, 2 },
+ { 1, 0 },
+};
+
+static void free_ws(struct wspace *ws)
+{
+ if (!ws)
+ return;
+
+ kfree(ws->errlocs);
+ kfree(ws->c);
+ kfree(ws);
+}
+
+static struct wspace *alloc_ws(struct rs_codec *rs)
+{
+ int nroots = rs->nroots;
+ struct wspace *ws;
+ int nn = rs->nn;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ return NULL;
+
+ ws->c = kmalloc_array(2 * (nn + nroots),
+ sizeof(uint16_t), GFP_KERNEL);
+ if (!ws->c)
+ goto err;
+
+ ws->r = ws->c + nn;
+ ws->s = ws->r + nn;
+ ws->corr = ws->s + nroots;
+
+ ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
+ if (!ws->errlocs)
+ goto err;
+
+ ws->derrlocs = ws->errlocs + nn;
+ return ws;
+
+err:
+ free_ws(ws);
+ return NULL;
+}
+
+
+/*
+ * Generates a random codeword and stores it in c. Generates random errors and
+ * erasures, and stores the random word with errors in r. Erasure positions are
+ * stored in derrlocs, while errlocs has one of three values in every position:
+ *
+ * 0 if there is no error in this position;
+ * 1 if there is a symbol error in this position;
+ * 2 if there is an erasure without symbol corruption.
+ *
+ * Returns the number of corrupted symbols.
+ */
+static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
+ int len, int errs, int eras)
+{
+ int nroots = rs->codec->nroots;
+ int *derrlocs = ws->derrlocs;
+ int *errlocs = ws->errlocs;
+ int dlen = len - nroots;
+ int nn = rs->codec->nn;
+ uint16_t *c = ws->c;
+ uint16_t *r = ws->r;
+ int errval;
+ int errloc;
+ int i;
+
+ /* Load c with random data and encode */
+ for (i = 0; i < dlen; i++)
+ c[i] = get_random_u32() & nn;
+
+ memset(c + dlen, 0, nroots * sizeof(*c));
+ encode_rs16(rs, c, dlen, c + dlen, 0);
+
+ /* Make copyand add errors and erasures */
+ memcpy(r, c, len * sizeof(*r));
+ memset(errlocs, 0, len * sizeof(*errlocs));
+ memset(derrlocs, 0, nroots * sizeof(*derrlocs));
+
+ /* Generating random errors */
+ for (i = 0; i < errs; i++) {
+ do {
+ /* Error value must be nonzero */
+ errval = get_random_u32() & nn;
+ } while (errval == 0);
+
+ do {
+ /* Must not choose the same location twice */
+ errloc = get_random_u32_below(len);
+ } while (errlocs[errloc] != 0);
+
+ errlocs[errloc] = 1;
+ r[errloc] ^= errval;
+ }
+
+ /* Generating random erasures */
+ for (i = 0; i < eras; i++) {
+ do {
+ /* Must not choose the same location twice */
+ errloc = get_random_u32_below(len);
+ } while (errlocs[errloc] != 0);
+
+ derrlocs[i] = errloc;
+
+ if (ewsc && get_random_u32_below(2)) {
+ /* Erasure with the symbol intact */
+ errlocs[errloc] = 2;
+ } else {
+ /* Erasure with corrupted symbol */
+ do {
+ /* Error value must be nonzero */
+ errval = get_random_u32() & nn;
+ } while (errval == 0);
+
+ errlocs[errloc] = 1;
+ r[errloc] ^= errval;
+ errs++;
+ }
+ }
+
+ return errs;
+}
+
+static void fix_err(uint16_t *data, int nerrs, uint16_t *corr, int *errlocs)
+{
+ int i;
+
+ for (i = 0; i < nerrs; i++)
+ data[errlocs[i]] ^= corr[i];
+}
+
+static void compute_syndrome(struct rs_control *rsc, uint16_t *data,
+ int len, uint16_t *syn)
+{
+ struct rs_codec *rs = rsc->codec;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ int nroots = rs->nroots;
+ int prim = rs->prim;
+ int fcr = rs->fcr;
+ int i, j;
+
+ /* Calculating syndrome */
+ for (i = 0; i < nroots; i++) {
+ syn[i] = data[0];
+ for (j = 1; j < len; j++) {
+ if (syn[i] == 0) {
+ syn[i] = data[j];
+ } else {
+ syn[i] = data[j] ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]]
+ + (fcr + i) * prim)];
+ }
+ }
+ }
+
+ /* Convert to index form */
+ for (i = 0; i < nroots; i++)
+ syn[i] = rs->index_of[syn[i]];
+}
+
+/* Test up to error correction capacity */
+static void test_uc(struct rs_control *rs, int len, int errs,
+ int eras, int trials, struct estat *stat,
+ struct wspace *ws, int method)
+{
+ int dlen = len - rs->codec->nroots;
+ int *derrlocs = ws->derrlocs;
+ int *errlocs = ws->errlocs;
+ uint16_t *corr = ws->corr;
+ uint16_t *c = ws->c;
+ uint16_t *r = ws->r;
+ uint16_t *s = ws->s;
+ int derrs, nerrs;
+ int i, j;
+
+ for (j = 0; j < trials; j++) {
+ nerrs = get_rcw_we(rs, ws, len, errs, eras);
+
+ switch (method) {
+ case CORR_BUFFER:
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+ break;
+ case CALLER_SYNDROME:
+ compute_syndrome(rs, r, len, s);
+ derrs = decode_rs16(rs, NULL, NULL, dlen,
+ s, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+ break;
+ case IN_PLACE:
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, NULL);
+ break;
+ default:
+ continue;
+ }
+
+ if (derrs != nerrs)
+ stat->irv++;
+
+ if (method != IN_PLACE) {
+ for (i = 0; i < derrs; i++) {
+ if (errlocs[derrlocs[i]] != 1)
+ stat->wepos++;
+ }
+ }
+
+ if (memcmp(r, c, len * sizeof(*r)))
+ stat->dwrong++;
+ }
+ stat->nwords += trials;
+}
+
+static int ex_rs_helper(struct rs_control *rs, struct wspace *ws,
+ int len, int trials, int method)
+{
+ static const char * const desc[] = {
+ "Testing correction buffer interface...",
+ "Testing with caller provided syndrome...",
+ "Testing in-place interface..."
+ };
+
+ struct estat stat = {0, 0, 0, 0};
+ int nroots = rs->codec->nroots;
+ int errs, eras, retval;
+
+ if (v >= V_PROGRESS)
+ pr_info(" %s\n", desc[method]);
+
+ for (errs = 0; errs <= nroots / 2; errs++)
+ for (eras = 0; eras <= nroots - 2 * errs; eras++)
+ test_uc(rs, len, errs, eras, trials, &stat, ws, method);
+
+ if (v >= V_CSUMMARY) {
+ pr_info(" Decodes wrong: %d / %d\n",
+ stat.dwrong, stat.nwords);
+ pr_info(" Wrong return value: %d / %d\n",
+ stat.irv, stat.nwords);
+ if (method != IN_PLACE)
+ pr_info(" Wrong error position: %d\n", stat.wepos);
+ }
+
+ retval = stat.dwrong + stat.wepos + stat.irv;
+ if (retval && v >= V_PROGRESS)
+ pr_warn(" FAIL: %d decoding failures!\n", retval);
+
+ return retval;
+}
+
+static int exercise_rs(struct rs_control *rs, struct wspace *ws,
+ int len, int trials)
+{
+
+ int retval = 0;
+ int i;
+
+ if (v >= V_PROGRESS)
+ pr_info("Testing up to error correction capacity...\n");
+
+ for (i = 0; i <= IN_PLACE; i++)
+ retval |= ex_rs_helper(rs, ws, len, trials, i);
+
+ return retval;
+}
+
+/* Tests for correct behaviour beyond error correction capacity */
+static void test_bc(struct rs_control *rs, int len, int errs,
+ int eras, int trials, struct bcstat *stat,
+ struct wspace *ws)
+{
+ int nroots = rs->codec->nroots;
+ int dlen = len - nroots;
+ int *derrlocs = ws->derrlocs;
+ uint16_t *corr = ws->corr;
+ uint16_t *r = ws->r;
+ int derrs, j;
+
+ for (j = 0; j < trials; j++) {
+ get_rcw_we(rs, ws, len, errs, eras);
+ derrs = decode_rs16(rs, r, r + dlen, dlen,
+ NULL, eras, derrlocs, 0, corr);
+ fix_err(r, derrs, corr, derrlocs);
+
+ if (derrs >= 0) {
+ stat->rsuccess++;
+
+ /*
+ * We check that the returned word is actually a
+ * codeword. The obvious way to do this would be to
+ * compute the syndrome, but we don't want to replicate
+ * that code here. However, all the codes are in
+ * systematic form, and therefore we can encode the
+ * returned word, and see whether the parity changes or
+ * not.
+ */
+ memset(corr, 0, nroots * sizeof(*corr));
+ encode_rs16(rs, r, dlen, corr, 0);
+
+ if (memcmp(r + dlen, corr, nroots * sizeof(*corr)))
+ stat->noncw++;
+ } else {
+ stat->rfail++;
+ }
+ }
+ stat->nwords += trials;
+}
+
+static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws,
+ int len, int trials)
+{
+ struct bcstat stat = {0, 0, 0, 0};
+ int nroots = rs->codec->nroots;
+ int errs, eras, cutoff;
+
+ if (v >= V_PROGRESS)
+ pr_info("Testing beyond error correction capacity...\n");
+
+ for (errs = 1; errs <= nroots; errs++) {
+ eras = nroots - 2 * errs + 1;
+ if (eras < 0)
+ eras = 0;
+
+ cutoff = nroots <= len - errs ? nroots : len - errs;
+ for (; eras <= cutoff; eras++)
+ test_bc(rs, len, errs, eras, trials, &stat, ws);
+ }
+
+ if (v >= V_CSUMMARY) {
+ pr_info(" decoder gives up: %d / %d\n",
+ stat.rfail, stat.nwords);
+ pr_info(" decoder returns success: %d / %d\n",
+ stat.rsuccess, stat.nwords);
+ pr_info(" not a codeword: %d / %d\n",
+ stat.noncw, stat.rsuccess);
+ }
+
+ if (stat.noncw && v >= V_PROGRESS)
+ pr_warn(" FAIL: %d silent failures!\n", stat.noncw);
+
+ return stat.noncw;
+}
+
+static int run_exercise(struct etab *e)
+{
+ int nn = (1 << e->symsize) - 1;
+ int kk = nn - e->nroots;
+ struct rs_control *rsc;
+ int retval = -ENOMEM;
+ int max_pad = kk - 1;
+ int prev_pad = -1;
+ struct wspace *ws;
+ int i;
+
+ rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots);
+ if (!rsc)
+ return retval;
+
+ ws = alloc_ws(rsc->codec);
+ if (!ws)
+ goto err;
+
+ retval = 0;
+ for (i = 0; i < ARRAY_SIZE(pad_coef); i++) {
+ int pad = (pad_coef[i].mult * max_pad) >> pad_coef[i].shift;
+ int len = nn - pad;
+
+ if (pad == prev_pad)
+ continue;
+
+ prev_pad = pad;
+ if (v >= V_PROGRESS) {
+ pr_info("Testing (%d,%d)_%d code...\n",
+ len, kk - pad, nn + 1);
+ }
+
+ retval |= exercise_rs(rsc, ws, len, e->ntrials);
+ if (bc)
+ retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
+ }
+
+ free_ws(ws);
+
+err:
+ free_rs(rsc);
+ return retval;
+}
+
+static int __init test_rslib_init(void)
+{
+ int i, fail = 0;
+
+ for (i = 0; Tab[i].symsize != 0 ; i++) {
+ int retval;
+
+ retval = run_exercise(Tab + i);
+ if (retval < 0)
+ return -ENOMEM;
+
+ fail |= retval;
+ }
+
+ if (fail)
+ pr_warn("rslib: test failed\n");
+ else
+ pr_info("rslib: test ok\n");
+
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void __exit test_rslib_exit(void)
+{
+}
+
+module_init(test_rslib_init)
+module_exit(test_rslib_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ferdinand Blomqvist");
+MODULE_DESCRIPTION("Reed-Solomon library test");
diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c
new file mode 100644
index 000000000..cf5609b1c
--- /dev/null
+++ b/lib/ref_tracker.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define pr_fmt(fmt) "ref_tracker: " fmt
+
+#include <linux/export.h>
+#include <linux/list_sort.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
+
+#define REF_TRACKER_STACK_ENTRIES 16
+#define STACK_BUF_SIZE 1024
+
+struct ref_tracker {
+ struct list_head head; /* anchor into dir->list or dir->quarantine */
+ bool dead;
+ depot_stack_handle_t alloc_stack_handle;
+ depot_stack_handle_t free_stack_handle;
+};
+
+struct ref_tracker_dir_stats {
+ int total;
+ int count;
+ struct {
+ depot_stack_handle_t stack_handle;
+ unsigned int count;
+ } stacks[];
+};
+
+static struct ref_tracker_dir_stats *
+ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit)
+{
+ struct ref_tracker_dir_stats *stats;
+ struct ref_tracker *tracker;
+
+ stats = kmalloc(struct_size(stats, stacks, limit),
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!stats)
+ return ERR_PTR(-ENOMEM);
+ stats->total = 0;
+ stats->count = 0;
+
+ list_for_each_entry(tracker, &dir->list, head) {
+ depot_stack_handle_t stack = tracker->alloc_stack_handle;
+ int i;
+
+ ++stats->total;
+ for (i = 0; i < stats->count; ++i)
+ if (stats->stacks[i].stack_handle == stack)
+ break;
+ if (i >= limit)
+ continue;
+ if (i >= stats->count) {
+ stats->stacks[i].stack_handle = stack;
+ stats->stacks[i].count = 0;
+ ++stats->count;
+ }
+ ++stats->stacks[i].count;
+ }
+
+ return stats;
+}
+
+struct ostream {
+ char *buf;
+ int size, used;
+};
+
+#define pr_ostream(stream, fmt, args...) \
+({ \
+ struct ostream *_s = (stream); \
+\
+ if (!_s->buf) { \
+ pr_err(fmt, ##args); \
+ } else { \
+ int ret, len = _s->size - _s->used; \
+ ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \
+ _s->used += min(ret, len); \
+ } \
+})
+
+static void
+__ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir,
+ unsigned int display_limit, struct ostream *s)
+{
+ struct ref_tracker_dir_stats *stats;
+ unsigned int i = 0, skipped;
+ depot_stack_handle_t stack;
+ char *sbuf;
+
+ lockdep_assert_held(&dir->lock);
+
+ if (list_empty(&dir->list))
+ return;
+
+ stats = ref_tracker_get_stats(dir, display_limit);
+ if (IS_ERR(stats)) {
+ pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n",
+ dir->name, dir, stats);
+ return;
+ }
+
+ sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN);
+
+ for (i = 0, skipped = stats->total; i < stats->count; ++i) {
+ stack = stats->stacks[i].stack_handle;
+ if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4))
+ sbuf[0] = 0;
+ pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir,
+ stats->stacks[i].count, stats->total, sbuf);
+ skipped -= stats->stacks[i].count;
+ }
+
+ if (skipped)
+ pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n",
+ dir->name, dir, skipped, stats->total);
+
+ kfree(sbuf);
+
+ kfree(stats);
+}
+
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+ struct ostream os = {};
+
+ __ref_tracker_dir_pr_ostream(dir, display_limit, &os);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print_locked);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dir->lock, flags);
+ ref_tracker_dir_print_locked(dir, display_limit);
+ spin_unlock_irqrestore(&dir->lock, flags);
+}
+EXPORT_SYMBOL(ref_tracker_dir_print);
+
+int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size)
+{
+ struct ostream os = { .buf = buf, .size = size };
+ unsigned long flags;
+
+ spin_lock_irqsave(&dir->lock, flags);
+ __ref_tracker_dir_pr_ostream(dir, 16, &os);
+ spin_unlock_irqrestore(&dir->lock, flags);
+
+ return os.used;
+}
+EXPORT_SYMBOL(ref_tracker_dir_snprint);
+
+void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+ struct ref_tracker *tracker, *n;
+ unsigned long flags;
+ bool leak = false;
+
+ dir->dead = true;
+ spin_lock_irqsave(&dir->lock, flags);
+ list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
+ list_del(&tracker->head);
+ kfree(tracker);
+ dir->quarantine_avail++;
+ }
+ if (!list_empty(&dir->list)) {
+ ref_tracker_dir_print_locked(dir, 16);
+ leak = true;
+ list_for_each_entry_safe(tracker, n, &dir->list, head) {
+ list_del(&tracker->head);
+ kfree(tracker);
+ }
+ }
+ spin_unlock_irqrestore(&dir->lock, flags);
+ WARN_ON_ONCE(leak);
+ WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
+ WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
+}
+EXPORT_SYMBOL(ref_tracker_dir_exit);
+
+int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ unsigned long entries[REF_TRACKER_STACK_ENTRIES];
+ struct ref_tracker *tracker;
+ unsigned int nr_entries;
+ gfp_t gfp_mask = gfp | __GFP_NOWARN;
+ unsigned long flags;
+
+ WARN_ON_ONCE(dir->dead);
+
+ if (!trackerp) {
+ refcount_inc(&dir->no_tracker);
+ return 0;
+ }
+ if (gfp & __GFP_DIRECT_RECLAIM)
+ gfp_mask |= __GFP_NOFAIL;
+ *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
+ if (unlikely(!tracker)) {
+ pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
+ refcount_inc(&dir->untracked);
+ return -ENOMEM;
+ }
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
+
+ spin_lock_irqsave(&dir->lock, flags);
+ list_add(&tracker->head, &dir->list);
+ spin_unlock_irqrestore(&dir->lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ref_tracker_alloc);
+
+int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ unsigned long entries[REF_TRACKER_STACK_ENTRIES];
+ depot_stack_handle_t stack_handle;
+ struct ref_tracker *tracker;
+ unsigned int nr_entries;
+ unsigned long flags;
+
+ WARN_ON_ONCE(dir->dead);
+
+ if (!trackerp) {
+ refcount_dec(&dir->no_tracker);
+ return 0;
+ }
+ tracker = *trackerp;
+ if (!tracker) {
+ refcount_dec(&dir->untracked);
+ return -EEXIST;
+ }
+ nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ stack_handle = stack_depot_save(entries, nr_entries,
+ GFP_NOWAIT | __GFP_NOWARN);
+
+ spin_lock_irqsave(&dir->lock, flags);
+ if (tracker->dead) {
+ pr_err("reference already released.\n");
+ if (tracker->alloc_stack_handle) {
+ pr_err("allocated in:\n");
+ stack_depot_print(tracker->alloc_stack_handle);
+ }
+ if (tracker->free_stack_handle) {
+ pr_err("freed in:\n");
+ stack_depot_print(tracker->free_stack_handle);
+ }
+ spin_unlock_irqrestore(&dir->lock, flags);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ tracker->dead = true;
+
+ tracker->free_stack_handle = stack_handle;
+
+ list_move_tail(&tracker->head, &dir->quarantine);
+ if (!dir->quarantine_avail) {
+ tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
+ list_del(&tracker->head);
+ } else {
+ dir->quarantine_avail--;
+ tracker = NULL;
+ }
+ spin_unlock_irqrestore(&dir->lock, flags);
+
+ kfree(tracker);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ref_tracker_free);
diff --git a/lib/refcount.c b/lib/refcount.c
new file mode 100644
index 000000000..a207a8f22
--- /dev/null
+++ b/lib/refcount.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Out-of-line refcount functions.
+ */
+
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/bug.h>
+
+#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
+
+void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
+{
+ refcount_set(r, REFCOUNT_SATURATED);
+
+ switch (t) {
+ case REFCOUNT_ADD_NOT_ZERO_OVF:
+ REFCOUNT_WARN("saturated; leaking memory");
+ break;
+ case REFCOUNT_ADD_OVF:
+ REFCOUNT_WARN("saturated; leaking memory");
+ break;
+ case REFCOUNT_ADD_UAF:
+ REFCOUNT_WARN("addition on 0; use-after-free");
+ break;
+ case REFCOUNT_SUB_UAF:
+ REFCOUNT_WARN("underflow; use-after-free");
+ break;
+ case REFCOUNT_DEC_LEAK:
+ REFCOUNT_WARN("decrement hit 0; leaking memory");
+ break;
+ default:
+ REFCOUNT_WARN("unknown saturation event!?");
+ }
+}
+EXPORT_SYMBOL(refcount_warn_saturate);
+
+/**
+ * refcount_dec_if_one - decrement a refcount if it is 1
+ * @r: the refcount
+ *
+ * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
+ * success thereof.
+ *
+ * Like all decrement operations, it provides release memory order and provides
+ * a control dependency.
+ *
+ * It can be used like a try-delete operator; this explicit case is provided
+ * and not cmpxchg in generic, because that would allow implementing unsafe
+ * operations.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
+bool refcount_dec_if_one(refcount_t *r)
+{
+ int val = 1;
+
+ return atomic_try_cmpxchg_release(&r->refs, &val, 0);
+}
+EXPORT_SYMBOL(refcount_dec_if_one);
+
+/**
+ * refcount_dec_not_one - decrement a refcount if it is not 1
+ * @r: the refcount
+ *
+ * No atomic_t counterpart, it decrements unless the value is 1, in which case
+ * it will return false.
+ *
+ * Was often done like: atomic_add_unless(&var, -1, 1)
+ *
+ * Return: true if the decrement operation was successful, false otherwise
+ */
+bool refcount_dec_not_one(refcount_t *r)
+{
+ unsigned int new, val = atomic_read(&r->refs);
+
+ do {
+ if (unlikely(val == REFCOUNT_SATURATED))
+ return true;
+
+ if (val == 1)
+ return false;
+
+ new = val - 1;
+ if (new > val) {
+ WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
+ return true;
+ }
+
+ } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_not_one);
+
+/**
+ * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the mutex to be locked
+ *
+ * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
+ * to decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold mutex if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ mutex_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ mutex_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
+
+/**
+ * refcount_dec_and_lock - return holding spinlock if able to decrement
+ * refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock(lock);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock(lock);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock);
+
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interrupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock_irqsave(lock, *flags);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock_irqrestore(lock, *flags);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644
index 000000000..6ae2ba8e0
--- /dev/null
+++ b/lib/rhashtable.c
@@ -0,0 +1,1245 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
+ */
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/sched.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+#include <linux/rhashtable.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#define HASH_DEFAULT_SIZE 64UL
+#define HASH_MIN_SIZE 4U
+
+union nested_table {
+ union nested_table __rcu *table;
+ struct rhash_lock_head __rcu *bucket;
+};
+
+static u32 head_hashfn(struct rhashtable *ht,
+ const struct bucket_table *tbl,
+ const struct rhash_head *he)
+{
+ return rht_head_hashfn(ht, tbl, he, ht->p);
+}
+
+#ifdef CONFIG_PROVE_LOCKING
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+
+int lockdep_rht_mutex_is_held(struct rhashtable *ht)
+{
+ return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
+
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
+{
+ if (!debug_locks)
+ return 1;
+ if (unlikely(tbl->nest))
+ return 1;
+ return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
+#else
+#define ASSERT_RHT_MUTEX(HT)
+#endif
+
+static inline union nested_table *nested_table_top(
+ const struct bucket_table *tbl)
+{
+ /* The top-level bucket entry does not need RCU protection
+ * because it's set at the same time as tbl->nest.
+ */
+ return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
+}
+
+static void nested_table_free(union nested_table *ntbl, unsigned int size)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ const unsigned int len = 1 << shift;
+ unsigned int i;
+
+ ntbl = rcu_dereference_protected(ntbl->table, 1);
+ if (!ntbl)
+ return;
+
+ if (size > len) {
+ size >>= shift;
+ for (i = 0; i < len; i++)
+ nested_table_free(ntbl + i, size);
+ }
+
+ kfree(ntbl);
+}
+
+static void nested_bucket_table_free(const struct bucket_table *tbl)
+{
+ unsigned int size = tbl->size >> tbl->nest;
+ unsigned int len = 1 << tbl->nest;
+ union nested_table *ntbl;
+ unsigned int i;
+
+ ntbl = nested_table_top(tbl);
+
+ for (i = 0; i < len; i++)
+ nested_table_free(ntbl + i, size);
+
+ kfree(ntbl);
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+ if (tbl->nest)
+ nested_bucket_table_free(tbl);
+
+ kvfree(tbl);
+}
+
+static void bucket_table_free_rcu(struct rcu_head *head)
+{
+ bucket_table_free(container_of(head, struct bucket_table, rcu));
+}
+
+static union nested_table *nested_table_alloc(struct rhashtable *ht,
+ union nested_table __rcu **prev,
+ bool leaf)
+{
+ union nested_table *ntbl;
+ int i;
+
+ ntbl = rcu_dereference(*prev);
+ if (ntbl)
+ return ntbl;
+
+ ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+
+ if (ntbl && leaf) {
+ for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
+ }
+
+ if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
+ return ntbl;
+ /* Raced with another thread. */
+ kfree(ntbl);
+ return rcu_dereference(*prev);
+}
+
+static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
+ size_t nbuckets,
+ gfp_t gfp)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ struct bucket_table *tbl;
+ size_t size;
+
+ if (nbuckets < (1 << (shift + 1)))
+ return NULL;
+
+ size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
+
+ tbl = kzalloc(size, gfp);
+ if (!tbl)
+ return NULL;
+
+ if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
+ false)) {
+ kfree(tbl);
+ return NULL;
+ }
+
+ tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
+
+ return tbl;
+}
+
+static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
+ size_t nbuckets,
+ gfp_t gfp)
+{
+ struct bucket_table *tbl = NULL;
+ size_t size;
+ int i;
+ static struct lock_class_key __key;
+
+ tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
+
+ size = nbuckets;
+
+ if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
+ tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
+ nbuckets = 0;
+ }
+
+ if (tbl == NULL)
+ return NULL;
+
+ lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
+
+ tbl->size = size;
+
+ rcu_head_init(&tbl->rcu);
+ INIT_LIST_HEAD(&tbl->walkers);
+
+ tbl->hash_rnd = get_random_u32();
+
+ for (i = 0; i < nbuckets; i++)
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
+
+ return tbl;
+}
+
+static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
+ struct bucket_table *tbl)
+{
+ struct bucket_table *new_tbl;
+
+ do {
+ new_tbl = tbl;
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ } while (tbl);
+
+ return new_tbl;
+}
+
+static int rhashtable_rehash_one(struct rhashtable *ht,
+ struct rhash_lock_head __rcu **bkt,
+ unsigned int old_hash)
+{
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
+ int err = -EAGAIN;
+ struct rhash_head *head, *next, *entry;
+ struct rhash_head __rcu **pprev = NULL;
+ unsigned int new_hash;
+ unsigned long flags;
+
+ if (new_tbl->nest)
+ goto out;
+
+ err = -ENOENT;
+
+ rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
+ old_tbl, old_hash) {
+ err = 0;
+ next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
+
+ if (rht_is_a_nulls(next))
+ break;
+
+ pprev = &entry->next;
+ }
+
+ if (err)
+ goto out;
+
+ new_hash = head_hashfn(ht, new_tbl, entry);
+
+ flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
+ SINGLE_DEPTH_NESTING);
+
+ head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
+
+ RCU_INIT_POINTER(entry->next, head);
+
+ rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
+
+ if (pprev)
+ rcu_assign_pointer(*pprev, next);
+ else
+ /* Need to preserved the bit lock. */
+ rht_assign_locked(bkt, next);
+
+out:
+ return err;
+}
+
+static int rhashtable_rehash_chain(struct rhashtable *ht,
+ unsigned int old_hash)
+{
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+ unsigned long flags;
+ int err;
+
+ if (!bkt)
+ return 0;
+ flags = rht_lock(old_tbl, bkt);
+
+ while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
+ ;
+
+ if (err == -ENOENT)
+ err = 0;
+ rht_unlock(old_tbl, bkt, flags);
+
+ return err;
+}
+
+static int rhashtable_rehash_attach(struct rhashtable *ht,
+ struct bucket_table *old_tbl,
+ struct bucket_table *new_tbl)
+{
+ /* Make insertions go into the new, empty table right away. Deletions
+ * and lookups will be attempted in both tables until we synchronize.
+ * As cmpxchg() provides strong barriers, we do not need
+ * rcu_assign_pointer().
+ */
+
+ if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
+ new_tbl) != NULL)
+ return -EEXIST;
+
+ return 0;
+}
+
+static int rhashtable_rehash_table(struct rhashtable *ht)
+{
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl;
+ struct rhashtable_walker *walker;
+ unsigned int old_hash;
+ int err;
+
+ new_tbl = rht_dereference(old_tbl->future_tbl, ht);
+ if (!new_tbl)
+ return 0;
+
+ for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+ err = rhashtable_rehash_chain(ht, old_hash);
+ if (err)
+ return err;
+ cond_resched();
+ }
+
+ /* Publish the new table pointer. */
+ rcu_assign_pointer(ht->tbl, new_tbl);
+
+ spin_lock(&ht->lock);
+ list_for_each_entry(walker, &old_tbl->walkers, list)
+ walker->tbl = NULL;
+
+ /* Wait for readers. All new readers will see the new
+ * table, and thus no references to the old table will
+ * remain.
+ * We do this inside the locked region so that
+ * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
+ * to check if it should not re-link the table.
+ */
+ call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
+ spin_unlock(&ht->lock);
+
+ return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
+}
+
+static int rhashtable_rehash_alloc(struct rhashtable *ht,
+ struct bucket_table *old_tbl,
+ unsigned int size)
+{
+ struct bucket_table *new_tbl;
+ int err;
+
+ ASSERT_RHT_MUTEX(ht);
+
+ new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
+ if (new_tbl == NULL)
+ return -ENOMEM;
+
+ err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+ if (err)
+ bucket_table_free(new_tbl);
+
+ return err;
+}
+
+/**
+ * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
+ * @ht: the hash table to shrink
+ *
+ * This function shrinks the hash table to fit, i.e., the smallest
+ * size would not cause it to expand right away automatically.
+ *
+ * The caller must ensure that no concurrent resizing occurs by holding
+ * ht->mutex.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ *
+ * It is valid to have concurrent insertions and deletions protected by per
+ * bucket locks or concurrent RCU protected lookups and traversals.
+ */
+static int rhashtable_shrink(struct rhashtable *ht)
+{
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ unsigned int nelems = atomic_read(&ht->nelems);
+ unsigned int size = 0;
+
+ if (nelems)
+ size = roundup_pow_of_two(nelems * 3 / 2);
+ if (size < ht->p.min_size)
+ size = ht->p.min_size;
+
+ if (old_tbl->size <= size)
+ return 0;
+
+ if (rht_dereference(old_tbl->future_tbl, ht))
+ return -EEXIST;
+
+ return rhashtable_rehash_alloc(ht, old_tbl, size);
+}
+
+static void rht_deferred_worker(struct work_struct *work)
+{
+ struct rhashtable *ht;
+ struct bucket_table *tbl;
+ int err = 0;
+
+ ht = container_of(work, struct rhashtable, run_work);
+ mutex_lock(&ht->mutex);
+
+ tbl = rht_dereference(ht->tbl, ht);
+ tbl = rhashtable_last_table(ht, tbl);
+
+ if (rht_grow_above_75(ht, tbl))
+ err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
+ else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
+ err = rhashtable_shrink(ht);
+ else if (tbl->nest)
+ err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
+
+ if (!err || err == -EEXIST) {
+ int nerr;
+
+ nerr = rhashtable_rehash_table(ht);
+ err = err ?: nerr;
+ }
+
+ mutex_unlock(&ht->mutex);
+
+ if (err)
+ schedule_work(&ht->run_work);
+}
+
+static int rhashtable_insert_rehash(struct rhashtable *ht,
+ struct bucket_table *tbl)
+{
+ struct bucket_table *old_tbl;
+ struct bucket_table *new_tbl;
+ unsigned int size;
+ int err;
+
+ old_tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ size = tbl->size;
+
+ err = -EBUSY;
+
+ if (rht_grow_above_75(ht, tbl))
+ size *= 2;
+ /* Do not schedule more than one rehash */
+ else if (old_tbl != tbl)
+ goto fail;
+
+ err = -ENOMEM;
+
+ new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
+ if (new_tbl == NULL)
+ goto fail;
+
+ err = rhashtable_rehash_attach(ht, tbl, new_tbl);
+ if (err) {
+ bucket_table_free(new_tbl);
+ if (err == -EEXIST)
+ err = 0;
+ } else
+ schedule_work(&ht->run_work);
+
+ return err;
+
+fail:
+ /* Do not fail the insert if someone else did a rehash. */
+ if (likely(rcu_access_pointer(tbl->future_tbl)))
+ return 0;
+
+ /* Schedule async rehash to retry allocation in process context. */
+ if (err == -ENOMEM)
+ schedule_work(&ht->run_work);
+
+ return err;
+}
+
+static void *rhashtable_lookup_one(struct rhashtable *ht,
+ struct rhash_lock_head __rcu **bkt,
+ struct bucket_table *tbl, unsigned int hash,
+ const void *key, struct rhash_head *obj)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct rhash_head __rcu **pprev = NULL;
+ struct rhash_head *head;
+ int elasticity;
+
+ elasticity = RHT_ELASTICITY;
+ rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
+ struct rhlist_head *list;
+ struct rhlist_head *plist;
+
+ elasticity--;
+ if (!key ||
+ (ht->p.obj_cmpfn ?
+ ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head)))) {
+ pprev = &head->next;
+ continue;
+ }
+
+ if (!ht->rhlist)
+ return rht_obj(ht, head);
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
+
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ if (pprev)
+ rcu_assign_pointer(*pprev, obj);
+ else
+ /* Need to preserve the bit lock */
+ rht_assign_locked(bkt, obj);
+
+ return NULL;
+ }
+
+ if (elasticity <= 0)
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct bucket_table *rhashtable_insert_one(
+ struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
+ struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
+ void *data)
+{
+ struct bucket_table *new_tbl;
+ struct rhash_head *head;
+
+ if (!IS_ERR_OR_NULL(data))
+ return ERR_PTR(-EEXIST);
+
+ if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
+ return ERR_CAST(data);
+
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (new_tbl)
+ return new_tbl;
+
+ if (PTR_ERR(data) != -ENOENT)
+ return ERR_CAST(data);
+
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ return ERR_PTR(-E2BIG);
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ return ERR_PTR(-EAGAIN);
+
+ head = rht_ptr(bkt, tbl, hash);
+
+ RCU_INIT_POINTER(obj->next, head);
+ if (ht->rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
+
+ /* bkt is always the head of the list, so it holds
+ * the lock, which we need to preserve
+ */
+ rht_assign_locked(bkt, obj);
+
+ atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
+ return NULL;
+}
+
+static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj)
+{
+ struct bucket_table *new_tbl;
+ struct bucket_table *tbl;
+ struct rhash_lock_head __rcu **bkt;
+ unsigned long flags;
+ unsigned int hash;
+ void *data;
+
+ new_tbl = rcu_dereference(ht->tbl);
+
+ do {
+ tbl = new_tbl;
+ hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+ if (rcu_access_pointer(tbl->future_tbl))
+ /* Failure is OK */
+ bkt = rht_bucket_var(tbl, hash);
+ else
+ bkt = rht_bucket_insert(ht, tbl, hash);
+ if (bkt == NULL) {
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ data = ERR_PTR(-EAGAIN);
+ } else {
+ flags = rht_lock(tbl, bkt);
+ data = rhashtable_lookup_one(ht, bkt, tbl,
+ hash, key, obj);
+ new_tbl = rhashtable_insert_one(ht, bkt, tbl,
+ hash, obj, data);
+ if (PTR_ERR(new_tbl) != -EEXIST)
+ data = ERR_CAST(new_tbl);
+
+ rht_unlock(tbl, bkt, flags);
+ }
+ } while (!IS_ERR_OR_NULL(new_tbl));
+
+ if (PTR_ERR(data) == -EAGAIN)
+ data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
+ -EAGAIN);
+
+ return data;
+}
+
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj)
+{
+ void *data;
+
+ do {
+ rcu_read_lock();
+ data = rhashtable_try_insert(ht, key, obj);
+ rcu_read_unlock();
+ } while (PTR_ERR(data) == -EAGAIN);
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
+
+/**
+ * rhashtable_walk_enter - Initialise an iterator
+ * @ht: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may be called from any process context, including
+ * non-preemptable context, but cannot be called from softirq or
+ * hardirq context.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
+{
+ iter->ht = ht;
+ iter->p = NULL;
+ iter->slot = 0;
+ iter->skip = 0;
+ iter->end_of_table = 0;
+
+ spin_lock(&ht->lock);
+ iter->walker.tbl =
+ rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
+ list_add(&iter->walker.list, &iter->walker.tbl->walkers);
+ spin_unlock(&ht->lock);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
+
+/**
+ * rhashtable_walk_exit - Free an iterator
+ * @iter: Hash table Iterator
+ *
+ * This function frees resources allocated by rhashtable_walk_enter.
+ */
+void rhashtable_walk_exit(struct rhashtable_iter *iter)
+{
+ spin_lock(&iter->ht->lock);
+ if (iter->walker.tbl)
+ list_del(&iter->walker.list);
+ spin_unlock(&iter->ht->lock);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
+
+/**
+ * rhashtable_walk_start_check - Start a hash table walk
+ * @iter: Hash table iterator
+ *
+ * Start a hash table walk at the current iterator position. Note that we take
+ * the RCU lock in all cases including when we return an error. So you must
+ * always call rhashtable_walk_stop to clean up.
+ *
+ * Returns zero if successful.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may use it immediately
+ * by calling rhashtable_walk_next.
+ *
+ * rhashtable_walk_start is defined as an inline variant that returns
+ * void. This is preferred in cases where the caller would ignore
+ * resize events and always continue.
+ */
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
+ __acquires(RCU)
+{
+ struct rhashtable *ht = iter->ht;
+ bool rhlist = ht->rhlist;
+
+ rcu_read_lock();
+
+ spin_lock(&ht->lock);
+ if (iter->walker.tbl)
+ list_del(&iter->walker.list);
+ spin_unlock(&ht->lock);
+
+ if (iter->end_of_table)
+ return 0;
+ if (!iter->walker.tbl) {
+ iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
+ iter->slot = 0;
+ iter->skip = 0;
+ return -EAGAIN;
+ }
+
+ if (iter->p && !rhlist) {
+ /*
+ * We need to validate that 'p' is still in the table, and
+ * if so, update 'skip'
+ */
+ struct rhash_head *p;
+ int skip = 0;
+ rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
+ skip++;
+ if (p == iter->p) {
+ iter->skip = skip;
+ goto found;
+ }
+ }
+ iter->p = NULL;
+ } else if (iter->p && rhlist) {
+ /* Need to validate that 'list' is still in the table, and
+ * if so, update 'skip' and 'p'.
+ */
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ int skip = 0;
+ rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
+ for (list = container_of(p, struct rhlist_head, rhead);
+ list;
+ list = rcu_dereference(list->next)) {
+ skip++;
+ if (list == iter->list) {
+ iter->p = p;
+ iter->skip = skip;
+ goto found;
+ }
+ }
+ }
+ iter->p = NULL;
+ }
+found:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
+
+/**
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
+ * one in case of a new walk).
+ *
+ * @iter: Hash table iterator
+ *
+ * Returns the found object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred.
+ */
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
+{
+ struct bucket_table *tbl = iter->walker.tbl;
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
+
+ if (!tbl)
+ return NULL;
+
+ for (; iter->slot < tbl->size; iter->slot++) {
+ int skip = iter->skip;
+
+ rht_for_each_rcu(p, tbl, iter->slot) {
+ if (rhlist) {
+ list = container_of(p, struct rhlist_head,
+ rhead);
+ do {
+ if (!skip)
+ goto next;
+ skip--;
+ list = rcu_dereference(list->next);
+ } while (list);
+
+ continue;
+ }
+ if (!skip)
+ break;
+ skip--;
+ }
+
+next:
+ if (!rht_is_a_nulls(p)) {
+ iter->skip++;
+ iter->p = p;
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
+ }
+
+ iter->skip = 0;
+ }
+
+ iter->p = NULL;
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (iter->walker.tbl) {
+ iter->slot = 0;
+ iter->skip = 0;
+ return ERR_PTR(-EAGAIN);
+ } else {
+ iter->end_of_table = true;
+ }
+
+ return NULL;
+}
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
+
+ if (p) {
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
+ p = rcu_dereference(p->next);
+ list = container_of(p, struct rhlist_head, rhead);
+ }
+ if (!rht_is_a_nulls(p)) {
+ iter->skip++;
+ iter->p = p;
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
+ }
+
+ /* At the end of this slot, switch to next one and then find
+ * next entry from that point.
+ */
+ iter->skip = 0;
+ iter->slot++;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_next);
+
+/**
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+
+ if (p)
+ return rht_obj(ht, ht->rhlist ? &list->rhead : p);
+
+ /* No object found in current iter, find next one in the table. */
+
+ if (iter->skip) {
+ /* A nonzero skip value points to the next entry in the table
+ * beyond that last one that was found. Decrement skip so
+ * we find the current value. __rhashtable_walk_find_next
+ * will restore the original value of skip assuming that
+ * the table hasn't changed.
+ */
+ iter->skip--;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
+
+/**
+ * rhashtable_walk_stop - Finish a hash table walk
+ * @iter: Hash table iterator
+ *
+ * Finish a hash table walk. Does not reset the iterator to the start of the
+ * hash table.
+ */
+void rhashtable_walk_stop(struct rhashtable_iter *iter)
+ __releases(RCU)
+{
+ struct rhashtable *ht;
+ struct bucket_table *tbl = iter->walker.tbl;
+
+ if (!tbl)
+ goto out;
+
+ ht = iter->ht;
+
+ spin_lock(&ht->lock);
+ if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
+ /* This bucket table is being freed, don't re-link it. */
+ iter->walker.tbl = NULL;
+ else
+ list_add(&iter->walker.list, &tbl->walkers);
+ spin_unlock(&ht->lock);
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
+
+static size_t rounded_hashtable_size(const struct rhashtable_params *params)
+{
+ size_t retsize;
+
+ if (params->nelem_hint)
+ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ (unsigned long)params->min_size);
+ else
+ retsize = max(HASH_DEFAULT_SIZE,
+ (unsigned long)params->min_size);
+
+ return retsize;
+}
+
+static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
+{
+ return jhash2(key, length, seed);
+}
+
+/**
+ * rhashtable_init - initialize a new hash table
+ * @ht: hash table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash table based on the provided configuration
+ * parameters. A table can be configured either with a variable or
+ * fixed length key:
+ *
+ * Configuration Example 1: Fixed length keys
+ * struct test_obj {
+ * int key;
+ * void * my_member;
+ * struct rhash_head node;
+ * };
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .key_offset = offsetof(struct test_obj, key),
+ * .key_len = sizeof(int),
+ * .hashfn = jhash,
+ * };
+ *
+ * Configuration Example 2: Variable length keys
+ * struct test_obj {
+ * [...]
+ * struct rhash_head node;
+ * };
+ *
+ * u32 my_hash_fn(const void *data, u32 len, u32 seed)
+ * {
+ * struct test_obj *obj = data;
+ *
+ * return [... hash ...];
+ * }
+ *
+ * struct rhashtable_params params = {
+ * .head_offset = offsetof(struct test_obj, node),
+ * .hashfn = jhash,
+ * .obj_hashfn = my_hash_fn,
+ * };
+ */
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params)
+{
+ struct bucket_table *tbl;
+ size_t size;
+
+ if ((!params->key_len && !params->obj_hashfn) ||
+ (params->obj_hashfn && !params->obj_cmpfn))
+ return -EINVAL;
+
+ memset(ht, 0, sizeof(*ht));
+ mutex_init(&ht->mutex);
+ spin_lock_init(&ht->lock);
+ memcpy(&ht->p, params, sizeof(*params));
+
+ if (params->min_size)
+ ht->p.min_size = roundup_pow_of_two(params->min_size);
+
+ /* Cap total entries at 2^31 to avoid nelems overflow. */
+ ht->max_elems = 1u << 31;
+
+ if (params->max_size) {
+ ht->p.max_size = rounddown_pow_of_two(params->max_size);
+ if (ht->p.max_size < ht->max_elems / 2)
+ ht->max_elems = ht->p.max_size * 2;
+ }
+
+ ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+
+ size = rounded_hashtable_size(&ht->p);
+
+ ht->key_len = ht->p.key_len;
+ if (!params->hashfn) {
+ ht->p.hashfn = jhash;
+
+ if (!(ht->key_len & (sizeof(u32) - 1))) {
+ ht->key_len /= sizeof(u32);
+ ht->p.hashfn = rhashtable_jhash2;
+ }
+ }
+
+ /*
+ * This is api initialization and thus we need to guarantee the
+ * initial rhashtable allocation. Upon failure, retry with the
+ * smallest possible size with __GFP_NOFAIL semantics.
+ */
+ tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
+ if (unlikely(tbl == NULL)) {
+ size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
+ tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
+ }
+
+ atomic_set(&ht->nelems, 0);
+
+ RCU_INIT_POINTER(ht->tbl, tbl);
+
+ INIT_WORK(&ht->run_work, rht_deferred_worker);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_init);
+
+/**
+ * rhltable_init - initialize a new hash list table
+ * @hlt: hash list table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash list table.
+ *
+ * See documentation for rhashtable_init.
+ */
+int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+{
+ int err;
+
+ err = rhashtable_init(&hlt->ht, params);
+ hlt->ht.rhlist = true;
+ return err;
+}
+EXPORT_SYMBOL_GPL(rhltable_init);
+
+static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg)
+{
+ struct rhlist_head *list;
+
+ if (!ht->rhlist) {
+ free_fn(rht_obj(ht, obj), arg);
+ return;
+ }
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ do {
+ obj = &list->rhead;
+ list = rht_dereference(list->next, ht);
+ free_fn(rht_obj(ht, obj), arg);
+ } while (list);
+}
+
+/**
+ * rhashtable_free_and_destroy - free elements and destroy hash table
+ * @ht: the hash table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * Stops an eventual async resize. If defined, invokes free_fn for each
+ * element to releasal resources. Please note that RCU protected
+ * readers may still be accessing the elements. Releasing of resources
+ * must occur in a compatible manner. Then frees the bucket array.
+ *
+ * This function will eventually sleep to wait for an async resize
+ * to complete. The caller is responsible that no further write operations
+ * occurs in parallel.
+ */
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg)
+{
+ struct bucket_table *tbl, *next_tbl;
+ unsigned int i;
+
+ cancel_work_sync(&ht->run_work);
+
+ mutex_lock(&ht->mutex);
+ tbl = rht_dereference(ht->tbl, ht);
+restart:
+ if (free_fn) {
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+
+ cond_resched();
+ for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL;
+ !rht_is_a_nulls(pos);
+ pos = next,
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL)
+ rhashtable_free_one(ht, pos, free_fn, arg);
+ }
+ }
+
+ next_tbl = rht_dereference(tbl->future_tbl, ht);
+ bucket_table_free(tbl);
+ if (next_tbl) {
+ tbl = next_tbl;
+ goto restart;
+ }
+ mutex_unlock(&ht->mutex);
+}
+EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
+
+void rhashtable_destroy(struct rhashtable *ht)
+{
+ return rhashtable_free_and_destroy(ht, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ unsigned int index = hash & ((1 << tbl->nest) - 1);
+ unsigned int size = tbl->size >> tbl->nest;
+ unsigned int subhash = hash;
+ union nested_table *ntbl;
+
+ ntbl = nested_table_top(tbl);
+ ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
+ subhash >>= tbl->nest;
+
+ while (ntbl && size > (1 << shift)) {
+ index = subhash & ((1 << shift) - 1);
+ ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
+ tbl, hash);
+ size >>= shift;
+ subhash >>= shift;
+ }
+
+ if (!ntbl)
+ return NULL;
+
+ return &ntbl[subhash].bucket;
+
+}
+EXPORT_SYMBOL_GPL(__rht_bucket_nested);
+
+struct rhash_lock_head __rcu **rht_bucket_nested(
+ const struct bucket_table *tbl, unsigned int hash)
+{
+ static struct rhash_lock_head __rcu *rhnull;
+
+ if (!rhnull)
+ INIT_RHT_NULLS_HEAD(rhnull);
+ return __rht_bucket_nested(tbl, hash) ?: &rhnull;
+}
+EXPORT_SYMBOL_GPL(rht_bucket_nested);
+
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+ struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
+{
+ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+ unsigned int index = hash & ((1 << tbl->nest) - 1);
+ unsigned int size = tbl->size >> tbl->nest;
+ union nested_table *ntbl;
+
+ ntbl = nested_table_top(tbl);
+ hash >>= tbl->nest;
+ ntbl = nested_table_alloc(ht, &ntbl[index].table,
+ size <= (1 << shift));
+
+ while (ntbl && size > (1 << shift)) {
+ index = hash & ((1 << shift) - 1);
+ size >>= shift;
+ hash >>= shift;
+ ntbl = nested_table_alloc(ht, &ntbl[index].table,
+ size <= (1 << shift));
+ }
+
+ if (!ntbl)
+ return NULL;
+
+ return &ntbl[hash].bucket;
+
+}
+EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
new file mode 100644
index 000000000..d0a5081df
--- /dev/null
+++ b/lib/sbitmap.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Facebook
+ * Copyright (C) 2013-2014 Jens Axboe
+ */
+
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/sbitmap.h>
+#include <linux/seq_file.h>
+
+static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
+{
+ unsigned depth = sb->depth;
+
+ sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
+ if (!sb->alloc_hint)
+ return -ENOMEM;
+
+ if (depth && !sb->round_robin) {
+ int i;
+
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
+ }
+ return 0;
+}
+
+static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
+ unsigned int depth)
+{
+ unsigned hint;
+
+ hint = this_cpu_read(*sb->alloc_hint);
+ if (unlikely(hint >= depth)) {
+ hint = depth ? get_random_u32_below(depth) : 0;
+ this_cpu_write(*sb->alloc_hint, hint);
+ }
+
+ return hint;
+}
+
+static inline void update_alloc_hint_after_get(struct sbitmap *sb,
+ unsigned int depth,
+ unsigned int hint,
+ unsigned int nr)
+{
+ if (nr == -1) {
+ /* If the map is full, a hint won't do us much good. */
+ this_cpu_write(*sb->alloc_hint, 0);
+ } else if (nr == hint || unlikely(sb->round_robin)) {
+ /* Only update the hint if we used it. */
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ this_cpu_write(*sb->alloc_hint, hint);
+ }
+}
+
+/*
+ * See if we have deferred clears that we can batch move
+ */
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
+{
+ unsigned long mask;
+
+ if (!READ_ONCE(map->cleared))
+ return false;
+
+ /*
+ * First get a stable cleared mask, setting the old mask to 0.
+ */
+ mask = xchg(&map->cleared, 0);
+
+ /*
+ * Now clear the masked bits in our free word
+ */
+ atomic_long_andnot(mask, (atomic_long_t *)&map->word);
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
+ return true;
+}
+
+int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
+ gfp_t flags, int node, bool round_robin,
+ bool alloc_hint)
+{
+ unsigned int bits_per_word;
+
+ if (shift < 0)
+ shift = sbitmap_calculate_shift(depth);
+
+ bits_per_word = 1U << shift;
+ if (bits_per_word > BITS_PER_LONG)
+ return -EINVAL;
+
+ sb->shift = shift;
+ sb->depth = depth;
+ sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+ sb->round_robin = round_robin;
+
+ if (depth == 0) {
+ sb->map = NULL;
+ return 0;
+ }
+
+ if (alloc_hint) {
+ if (init_alloc_hint(sb, flags))
+ return -ENOMEM;
+ } else {
+ sb->alloc_hint = NULL;
+ }
+
+ sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
+ if (!sb->map) {
+ free_percpu(sb->alloc_hint);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sbitmap_init_node);
+
+void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
+{
+ unsigned int bits_per_word = 1U << sb->shift;
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++)
+ sbitmap_deferred_clear(&sb->map[i]);
+
+ sb->depth = depth;
+ sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
+}
+EXPORT_SYMBOL_GPL(sbitmap_resize);
+
+static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
+ unsigned int hint, bool wrap)
+{
+ int nr;
+
+ /* don't wrap if starting from 0 */
+ wrap = wrap && hint;
+
+ while (1) {
+ nr = find_next_zero_bit(word, depth, hint);
+ if (unlikely(nr >= depth)) {
+ /*
+ * We started with an offset, and we didn't reset the
+ * offset to 0 in a failure case, so start from 0 to
+ * exhaust the map.
+ */
+ if (hint && wrap) {
+ hint = 0;
+ continue;
+ }
+ return -1;
+ }
+
+ if (!test_and_set_bit_lock(nr, word))
+ break;
+
+ hint = nr + 1;
+ if (hint >= depth - 1)
+ hint = 0;
+ }
+
+ return nr;
+}
+
+static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
+ unsigned int depth,
+ unsigned int alloc_hint,
+ bool wrap)
+{
+ int nr;
+
+ do {
+ nr = __sbitmap_get_word(&map->word, depth,
+ alloc_hint, wrap);
+ if (nr != -1)
+ break;
+ if (!sbitmap_deferred_clear(map))
+ break;
+ } while (1);
+
+ return nr;
+}
+
+static int sbitmap_find_bit(struct sbitmap *sb,
+ unsigned int depth,
+ unsigned int index,
+ unsigned int alloc_hint,
+ bool wrap)
+{
+ unsigned int i;
+ int nr = -1;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ nr = sbitmap_find_bit_in_word(&sb->map[index],
+ min_t(unsigned int,
+ __map_depth(sb, index),
+ depth),
+ alloc_hint, wrap);
+
+ if (nr != -1) {
+ nr += index << sb->shift;
+ break;
+ }
+
+ /* Jump to next index. */
+ alloc_hint = 0;
+ if (++index >= sb->map_nr)
+ index = 0;
+ }
+
+ return nr;
+}
+
+static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
+{
+ unsigned int index;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+
+ /*
+ * Unless we're doing round robin tag allocation, just use the
+ * alloc_hint to find the right word index. No point in looping
+ * twice in find_next_zero_bit() for that case.
+ */
+ if (sb->round_robin)
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+ else
+ alloc_hint = 0;
+
+ return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint,
+ !sb->round_robin);
+}
+
+int sbitmap_get(struct sbitmap *sb)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get(sb, hint);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_get);
+
+static int __sbitmap_get_shallow(struct sbitmap *sb,
+ unsigned int alloc_hint,
+ unsigned long shallow_depth)
+{
+ unsigned int index;
+
+ index = SB_NR_TO_INDEX(sb, alloc_hint);
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+
+ return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
+}
+
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
+{
+ int nr;
+ unsigned int hint, depth;
+
+ if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
+ return -1;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+ nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
+ update_alloc_hint_after_get(sb, depth, hint, nr);
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
+
+bool sbitmap_any_bit_set(const struct sbitmap *sb)
+{
+ unsigned int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ if (sb->map[i].word & ~sb->map[i].cleared)
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
+
+static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
+{
+ unsigned int i, weight = 0;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ const struct sbitmap_word *word = &sb->map[i];
+ unsigned int word_depth = __map_depth(sb, i);
+
+ if (set)
+ weight += bitmap_weight(&word->word, word_depth);
+ else
+ weight += bitmap_weight(&word->cleared, word_depth);
+ }
+ return weight;
+}
+
+static unsigned int sbitmap_cleared(const struct sbitmap *sb)
+{
+ return __sbitmap_weight(sb, false);
+}
+
+unsigned int sbitmap_weight(const struct sbitmap *sb)
+{
+ return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
+}
+EXPORT_SYMBOL_GPL(sbitmap_weight);
+
+void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
+{
+ seq_printf(m, "depth=%u\n", sb->depth);
+ seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
+ seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
+ seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
+ seq_printf(m, "map_nr=%u\n", sb->map_nr);
+}
+EXPORT_SYMBOL_GPL(sbitmap_show);
+
+static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
+{
+ if ((offset & 0xf) == 0) {
+ if (offset != 0)
+ seq_putc(m, '\n');
+ seq_printf(m, "%08x:", offset);
+ }
+ if ((offset & 0x1) == 0)
+ seq_putc(m, ' ');
+ seq_printf(m, "%02x", byte);
+}
+
+void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
+{
+ u8 byte = 0;
+ unsigned int byte_bits = 0;
+ unsigned int offset = 0;
+ int i;
+
+ for (i = 0; i < sb->map_nr; i++) {
+ unsigned long word = READ_ONCE(sb->map[i].word);
+ unsigned long cleared = READ_ONCE(sb->map[i].cleared);
+ unsigned int word_bits = __map_depth(sb, i);
+
+ word &= ~cleared;
+
+ while (word_bits > 0) {
+ unsigned int bits = min(8 - byte_bits, word_bits);
+
+ byte |= (word & (BIT(bits) - 1)) << byte_bits;
+ byte_bits += bits;
+ if (byte_bits == 8) {
+ emit_byte(m, offset, byte);
+ byte = 0;
+ byte_bits = 0;
+ offset++;
+ }
+ word >>= bits;
+ word_bits -= bits;
+ }
+ }
+ if (byte_bits) {
+ emit_byte(m, offset, byte);
+ offset++;
+ }
+ if (offset)
+ seq_putc(m, '\n');
+}
+EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
+
+static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int depth)
+{
+ unsigned int wake_batch;
+ unsigned int shallow_depth;
+
+ /*
+ * For each batch, we wake up one queue. We need to make sure that our
+ * batch size is small enough that the full depth of the bitmap,
+ * potentially limited by a shallow depth, is enough to wake up all of
+ * the queues.
+ *
+ * Each full word of the bitmap has bits_per_word bits, and there might
+ * be a partial word. There are depth / bits_per_word full words and
+ * depth % bits_per_word bits left over. In bitwise arithmetic:
+ *
+ * bits_per_word = 1 << shift
+ * depth / bits_per_word = depth >> shift
+ * depth % bits_per_word = depth & ((1 << shift) - 1)
+ *
+ * Each word can be limited to sbq->min_shallow_depth bits.
+ */
+ shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
+ depth = ((depth >> sbq->sb.shift) * shallow_depth +
+ min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
+ wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
+ SBQ_WAKE_BATCH);
+
+ return wake_batch;
+}
+
+int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
+ int shift, bool round_robin, gfp_t flags, int node)
+{
+ int ret;
+ int i;
+
+ ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
+ round_robin, true);
+ if (ret)
+ return ret;
+
+ sbq->min_shallow_depth = UINT_MAX;
+ sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
+ atomic_set(&sbq->wake_index, 0);
+ atomic_set(&sbq->ws_active, 0);
+ atomic_set(&sbq->completion_cnt, 0);
+ atomic_set(&sbq->wakeup_cnt, 0);
+
+ sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
+ if (!sbq->ws) {
+ sbitmap_free(&sbq->sb);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++)
+ init_waitqueue_head(&sbq->ws[i].wait);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
+
+static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int depth)
+{
+ unsigned int wake_batch;
+
+ wake_batch = sbq_calc_wake_batch(sbq, depth);
+ if (sbq->wake_batch != wake_batch)
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
+}
+
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users)
+{
+ unsigned int wake_batch;
+ unsigned int depth = (sbq->sb.depth + users - 1) / users;
+
+ wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
+ 1, SBQ_WAKE_BATCH);
+
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
+
+void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
+{
+ sbitmap_queue_update_wake_batch(sbq, depth);
+ sbitmap_resize(&sbq->sb, depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
+
+int __sbitmap_queue_get(struct sbitmap_queue *sbq)
+{
+ return sbitmap_get(&sbq->sb);
+}
+EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
+
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset)
+{
+ struct sbitmap *sb = &sbq->sb;
+ unsigned int hint, depth;
+ unsigned long index, nr;
+ int i;
+
+ if (unlikely(sb->round_robin))
+ return 0;
+
+ depth = READ_ONCE(sb->depth);
+ hint = update_alloc_hint_before_get(sb, depth);
+
+ index = SB_NR_TO_INDEX(sb, hint);
+
+ for (i = 0; i < sb->map_nr; i++) {
+ struct sbitmap_word *map = &sb->map[index];
+ unsigned long get_mask;
+ unsigned int map_depth = __map_depth(sb, index);
+
+ sbitmap_deferred_clear(map);
+ if (map->word == (1UL << (map_depth - 1)) - 1)
+ goto next;
+
+ nr = find_first_zero_bit(&map->word, map_depth);
+ if (nr + nr_tags <= map_depth) {
+ atomic_long_t *ptr = (atomic_long_t *) &map->word;
+ unsigned long val;
+
+ get_mask = ((1UL << nr_tags) - 1) << nr;
+ val = READ_ONCE(map->word);
+ while (!atomic_long_try_cmpxchg(ptr, &val,
+ get_mask | val))
+ ;
+ get_mask = (get_mask & ~val) >> nr;
+ if (get_mask) {
+ *offset = nr + (index << sb->shift);
+ update_alloc_hint_after_get(sb, depth, hint,
+ *offset + nr_tags - 1);
+ return get_mask;
+ }
+ }
+next:
+ /* Jump to next index. */
+ if (++index >= sb->map_nr)
+ index = 0;
+ }
+
+ return 0;
+}
+
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth)
+{
+ WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
+
+ return sbitmap_get_shallow(&sbq->sb, shallow_depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
+
+void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
+ unsigned int min_shallow_depth)
+{
+ sbq->min_shallow_depth = min_shallow_depth;
+ sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
+
+static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+{
+ int i, wake_index, woken;
+
+ if (!atomic_read(&sbq->ws_active))
+ return;
+
+ wake_index = atomic_read(&sbq->wake_index);
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[wake_index];
+
+ /*
+ * Advance the index before checking the current queue.
+ * It improves fairness, by ensuring the queue doesn't
+ * need to be fully emptied before trying to wake up
+ * from the next one.
+ */
+ wake_index = sbq_index_inc(wake_index);
+
+ if (waitqueue_active(&ws->wait)) {
+ woken = wake_up_nr(&ws->wait, nr);
+ if (woken == nr)
+ break;
+ nr -= woken;
+ }
+ }
+
+ if (wake_index != atomic_read(&sbq->wake_index))
+ atomic_set(&sbq->wake_index, wake_index);
+}
+
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+{
+ unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
+ unsigned int wakeups;
+
+ if (!atomic_read(&sbq->ws_active))
+ return;
+
+ atomic_add(nr, &sbq->completion_cnt);
+ wakeups = atomic_read(&sbq->wakeup_cnt);
+
+ do {
+ if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
+ return;
+ } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
+ &wakeups, wakeups + wake_batch));
+
+ __sbitmap_queue_wake_up(sbq, wake_batch);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
+
+static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
+{
+ if (likely(!sb->round_robin && tag < sb->depth))
+ data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
+}
+
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags)
+{
+ struct sbitmap *sb = &sbq->sb;
+ unsigned long *addr = NULL;
+ unsigned long mask = 0;
+ int i;
+
+ smp_mb__before_atomic();
+ for (i = 0; i < nr_tags; i++) {
+ const int tag = tags[i] - offset;
+ unsigned long *this_addr;
+
+ /* since we're clearing a batch, skip the deferred map */
+ this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
+ if (!addr) {
+ addr = this_addr;
+ } else if (addr != this_addr) {
+ atomic_long_andnot(mask, (atomic_long_t *) addr);
+ mask = 0;
+ addr = this_addr;
+ }
+ mask |= (1UL << SB_NR_TO_BIT(sb, tag));
+ }
+
+ if (mask)
+ atomic_long_andnot(mask, (atomic_long_t *) addr);
+
+ smp_mb__after_atomic();
+ sbitmap_queue_wake_up(sbq, nr_tags);
+ sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
+ tags[nr_tags - 1] - offset);
+}
+
+void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
+ unsigned int cpu)
+{
+ /*
+ * Once the clear bit is set, the bit may be allocated out.
+ *
+ * Orders READ/WRITE on the associated instance(such as request
+ * of blk_mq) by this bit for avoiding race with re-allocation,
+ * and its pair is the memory barrier implied in __sbitmap_get_word.
+ *
+ * One invariant is that the clear bit has to be zero when the bit
+ * is in use.
+ */
+ smp_mb__before_atomic();
+ sbitmap_deferred_clear_bit(&sbq->sb, nr);
+
+ /*
+ * Pairs with the memory barrier in set_current_state() to ensure the
+ * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
+ * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
+ * waiter. See the comment on waitqueue_active().
+ */
+ smp_mb__after_atomic();
+ sbitmap_queue_wake_up(sbq, 1);
+ sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
+
+void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
+{
+ int i, wake_index;
+
+ /*
+ * Pairs with the memory barrier in set_current_state() like in
+ * sbitmap_queue_wake_up().
+ */
+ smp_mb();
+ wake_index = atomic_read(&sbq->wake_index);
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[wake_index];
+
+ if (waitqueue_active(&ws->wait))
+ wake_up(&ws->wait);
+
+ wake_index = sbq_index_inc(wake_index);
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
+
+void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
+{
+ bool first;
+ int i;
+
+ sbitmap_show(&sbq->sb, m);
+
+ seq_puts(m, "alloc_hint={");
+ first = true;
+ for_each_possible_cpu(i) {
+ if (!first)
+ seq_puts(m, ", ");
+ first = false;
+ seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
+ }
+ seq_puts(m, "}\n");
+
+ seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
+ seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
+ seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
+
+ seq_puts(m, "ws={\n");
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[i];
+ seq_printf(m, "\t{.wait=%s},\n",
+ waitqueue_active(&ws->wait) ? "active" : "inactive");
+ }
+ seq_puts(m, "}\n");
+
+ seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
+ seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_show);
+
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ if (!sbq_wait->sbq) {
+ sbq_wait->sbq = sbq;
+ atomic_inc(&sbq->ws_active);
+ add_wait_queue(&ws->wait, &sbq_wait->wait);
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
+
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
+{
+ list_del_init(&sbq_wait->wait.entry);
+ if (sbq_wait->sbq) {
+ atomic_dec(&sbq_wait->sbq->ws_active);
+ sbq_wait->sbq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
+
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state)
+{
+ if (!sbq_wait->sbq) {
+ atomic_inc(&sbq->ws_active);
+ sbq_wait->sbq = sbq;
+ }
+ prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
+}
+EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
+
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ finish_wait(&ws->wait, &sbq_wait->wait);
+ if (sbq_wait->sbq) {
+ atomic_dec(&sbq->ws_active);
+ sbq_wait->sbq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
new file mode 100644
index 000000000..68b45c82c
--- /dev/null
+++ b/lib/scatterlist.c
@@ -0,0 +1,1368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
+ *
+ * Scatterlist handling helpers.
+ */
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/highmem.h>
+#include <linux/kmemleak.h>
+#include <linux/bvec.h>
+#include <linux/uio.h>
+
+/**
+ * sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * Usually the next entry will be @sg@ + 1, but if this sg element is part
+ * of a chained scatterlist, it could jump to the start of a new
+ * scatterlist array.
+ *
+ **/
+struct scatterlist *sg_next(struct scatterlist *sg)
+{
+ if (sg_is_last(sg))
+ return NULL;
+
+ sg++;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+
+ return sg;
+}
+EXPORT_SYMBOL(sg_next);
+
+/**
+ * sg_nents - return total count of entries in scatterlist
+ * @sg: The scatterlist
+ *
+ * Description:
+ * Allows to know how many entries are in sg, taking into account
+ * chaining as well
+ *
+ **/
+int sg_nents(struct scatterlist *sg)
+{
+ int nents;
+ for (nents = 0; sg; sg = sg_next(sg))
+ nents++;
+ return nents;
+}
+EXPORT_SYMBOL(sg_nents);
+
+/**
+ * sg_nents_for_len - return total count of entries in scatterlist
+ * needed to satisfy the supplied length
+ * @sg: The scatterlist
+ * @len: The total required length
+ *
+ * Description:
+ * Determines the number of entries in sg that are required to meet
+ * the supplied length, taking into account chaining as well
+ *
+ * Returns:
+ * the number of sg entries needed, negative error on failure
+ *
+ **/
+int sg_nents_for_len(struct scatterlist *sg, u64 len)
+{
+ int nents;
+ u64 total;
+
+ if (!len)
+ return 0;
+
+ for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
+ nents++;
+ total += sg->length;
+ if (total >= len)
+ return nents;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(sg_nents_for_len);
+
+/**
+ * sg_last - return the last scatterlist entry in a list
+ * @sgl: First entry in the scatterlist
+ * @nents: Number of entries in the scatterlist
+ *
+ * Description:
+ * Should only be used casually, it (currently) scans the entire list
+ * to get the last entry.
+ *
+ * Note that the @sgl@ pointer passed in need not be the first one,
+ * the important bit is that @nents@ denotes the number of entries that
+ * exist from @sgl@.
+ *
+ **/
+struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
+{
+ struct scatterlist *sg, *ret = NULL;
+ unsigned int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ ret = sg;
+
+ BUG_ON(!sg_is_last(ret));
+ return ret;
+}
+EXPORT_SYMBOL(sg_last);
+
+/**
+ * sg_init_table - Initialize SG table
+ * @sgl: The SG table
+ * @nents: Number of entries in table
+ *
+ * Notes:
+ * If this is part of a chained sg table, sg_mark_end() should be
+ * used only on the last table part.
+ *
+ **/
+void sg_init_table(struct scatterlist *sgl, unsigned int nents)
+{
+ memset(sgl, 0, sizeof(*sgl) * nents);
+ sg_init_marker(sgl, nents);
+}
+EXPORT_SYMBOL(sg_init_table);
+
+/**
+ * sg_init_one - Initialize a single entry sg list
+ * @sg: SG entry
+ * @buf: Virtual address for IO
+ * @buflen: IO length
+ *
+ **/
+void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, buflen);
+}
+EXPORT_SYMBOL(sg_init_one);
+
+/*
+ * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
+ * helpers.
+ */
+static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC) {
+ /*
+ * Kmemleak doesn't track page allocations as they are not
+ * commonly used (in a raw form) for kernel data structures.
+ * As we chain together a list of pages and then a normal
+ * kmalloc (tracked by kmemleak), in order to for that last
+ * allocation not to become decoupled (and thus a
+ * false-positive) we need to inform kmemleak of all the
+ * intermediate allocations.
+ */
+ void *ptr = (void *) __get_free_page(gfp_mask);
+ kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
+ return ptr;
+ } else
+ return kmalloc_array(nents, sizeof(struct scatterlist),
+ gfp_mask);
+}
+
+static void sg_kfree(struct scatterlist *sg, unsigned int nents)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC) {
+ kmemleak_free(sg);
+ free_page((unsigned long) sg);
+ } else
+ kfree(sg);
+}
+
+/**
+ * __sg_free_table - Free a previously mapped sg table
+ * @table: The sg table header to use
+ * @max_ents: The maximum number of entries per single scatterlist
+ * @nents_first_chunk: Number of entries int the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated first chunk
+ * @free_fn: Free function
+ * @num_ents: Number of entries in the table
+ *
+ * Description:
+ * Free an sg table previously allocated and setup with
+ * __sg_alloc_table(). The @max_ents value must be identical to
+ * that previously used with __sg_alloc_table().
+ *
+ **/
+void __sg_free_table(struct sg_table *table, unsigned int max_ents,
+ unsigned int nents_first_chunk, sg_free_fn *free_fn,
+ unsigned int num_ents)
+{
+ struct scatterlist *sgl, *next;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
+
+ if (unlikely(!table->sgl))
+ return;
+
+ sgl = table->sgl;
+ while (num_ents) {
+ unsigned int alloc_size = num_ents;
+ unsigned int sg_size;
+
+ /*
+ * If we have more than max_ents segments left,
+ * then assign 'next' to the sg table after the current one.
+ * sg_size is then one less than alloc size, since the last
+ * element is the chain pointer.
+ */
+ if (alloc_size > curr_max_ents) {
+ next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
+ alloc_size = curr_max_ents;
+ sg_size = alloc_size - 1;
+ } else {
+ sg_size = alloc_size;
+ next = NULL;
+ }
+
+ num_ents -= sg_size;
+ if (nents_first_chunk)
+ nents_first_chunk = 0;
+ else
+ free_fn(sgl, alloc_size);
+ sgl = next;
+ curr_max_ents = max_ents;
+ }
+
+ table->sgl = NULL;
+}
+EXPORT_SYMBOL(__sg_free_table);
+
+/**
+ * sg_free_append_table - Free a previously allocated append sg table.
+ * @table: The mapped sg append table header
+ *
+ **/
+void sg_free_append_table(struct sg_append_table *table)
+{
+ __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
+ table->total_nents);
+}
+EXPORT_SYMBOL(sg_free_append_table);
+
+
+/**
+ * sg_free_table - Free a previously allocated sg table
+ * @table: The mapped sg table header
+ *
+ **/
+void sg_free_table(struct sg_table *table)
+{
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree,
+ table->orig_nents);
+}
+EXPORT_SYMBOL(sg_free_table);
+
+/**
+ * __sg_alloc_table - Allocate and initialize an sg table with given allocator
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @max_ents: The maximum number of entries the allocator returns per call
+ * @first_chunk: first SGL if preallocated (may be %NULL)
+ * @nents_first_chunk: Number of entries in the (preallocated) first
+ * scatterlist chunk, 0 means no such preallocated chunk provided by user
+ * @gfp_mask: GFP allocation mask
+ * @alloc_fn: Allocator to use
+ *
+ * Description:
+ * This function returns a @table @nents long. The allocator is
+ * defined to return scatterlist chunks of maximum size @max_ents.
+ * Thus if @nents is bigger than @max_ents, the scatterlists will be
+ * chained in units of @max_ents.
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * __sg_free_table() to cleanup any leftover allocations.
+ *
+ **/
+int __sg_alloc_table(struct sg_table *table, unsigned int nents,
+ unsigned int max_ents, struct scatterlist *first_chunk,
+ unsigned int nents_first_chunk, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn)
+{
+ struct scatterlist *sg, *prv;
+ unsigned int left;
+ unsigned curr_max_ents = nents_first_chunk ?: max_ents;
+ unsigned prv_max_ents;
+
+ memset(table, 0, sizeof(*table));
+
+ if (nents == 0)
+ return -EINVAL;
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+ if (WARN_ON_ONCE(nents > max_ents))
+ return -EINVAL;
+#endif
+
+ left = nents;
+ prv = NULL;
+ do {
+ unsigned int sg_size, alloc_size = left;
+
+ if (alloc_size > curr_max_ents) {
+ alloc_size = curr_max_ents;
+ sg_size = alloc_size - 1;
+ } else
+ sg_size = alloc_size;
+
+ left -= sg_size;
+
+ if (first_chunk) {
+ sg = first_chunk;
+ first_chunk = NULL;
+ } else {
+ sg = alloc_fn(alloc_size, gfp_mask);
+ }
+ if (unlikely(!sg)) {
+ /*
+ * Adjust entry count to reflect that the last
+ * entry of the previous table won't be used for
+ * linkage. Without this, sg_kfree() may get
+ * confused.
+ */
+ if (prv)
+ table->nents = ++table->orig_nents;
+
+ return -ENOMEM;
+ }
+
+ sg_init_table(sg, alloc_size);
+ table->nents = table->orig_nents += sg_size;
+
+ /*
+ * If this is the first mapping, assign the sg table header.
+ * If this is not the first mapping, chain previous part.
+ */
+ if (prv)
+ sg_chain(prv, prv_max_ents, sg);
+ else
+ table->sgl = sg;
+
+ /*
+ * If no more entries after this one, mark the end
+ */
+ if (!left)
+ sg_mark_end(&sg[sg_size - 1]);
+
+ prv = sg;
+ prv_max_ents = curr_max_ents;
+ curr_max_ents = max_ents;
+ } while (left);
+
+ return 0;
+}
+EXPORT_SYMBOL(__sg_alloc_table);
+
+/**
+ * sg_alloc_table - Allocate and initialize an sg table
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table. If @nents@ is larger than
+ * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
+ *
+ **/
+int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
+{
+ int ret;
+
+ ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
+ NULL, 0, gfp_mask, sg_kmalloc);
+ if (unlikely(ret))
+ sg_free_table(table);
+ return ret;
+}
+EXPORT_SYMBOL(sg_alloc_table);
+
+static struct scatterlist *get_next_sg(struct sg_append_table *table,
+ struct scatterlist *cur,
+ unsigned long needed_sges,
+ gfp_t gfp_mask)
+{
+ struct scatterlist *new_sg, *next_sg;
+ unsigned int alloc_size;
+
+ if (cur) {
+ next_sg = sg_next(cur);
+ /* Check if last entry should be keeped for chainning */
+ if (!sg_is_last(next_sg) || needed_sges == 1)
+ return next_sg;
+ }
+
+ alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
+ new_sg = sg_kmalloc(alloc_size, gfp_mask);
+ if (!new_sg)
+ return ERR_PTR(-ENOMEM);
+ sg_init_table(new_sg, alloc_size);
+ if (cur) {
+ table->total_nents += alloc_size - 1;
+ __sg_chain(next_sg, new_sg);
+ } else {
+ table->sgt.sgl = new_sg;
+ table->total_nents = alloc_size;
+ }
+ return new_sg;
+}
+
+static bool pages_are_mergeable(struct page *a, struct page *b)
+{
+ if (page_to_pfn(a) != page_to_pfn(b) + 1)
+ return false;
+ if (!zone_device_pages_have_same_pgmap(a, b))
+ return false;
+ return true;
+}
+
+/**
+ * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
+ * table from an array of pages
+ * @sgt_append: The sg append table to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @max_segment: Maximum size of a scatterlist element in bytes
+ * @left_pages: Left pages caller have to set after this call
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * In the first call it allocate and initialize an sg table from a list of
+ * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
+ * the pages are squashed into a single scatterlist entry up to the maximum
+ * size specified in @max_segment. A user may provide an offset at a start
+ * and a size of valid data in a buffer specified by the page array. The
+ * returned sg table is released by sg_free_append_table
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * sg_free_append_table() to cleanup any leftover allocations.
+ *
+ * In the fist call, sgt_append must by initialized.
+ */
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
+ struct page **pages, unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ unsigned int left_pages, gfp_t gfp_mask)
+{
+ unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
+ unsigned int added_nents = 0;
+ struct scatterlist *s = sgt_append->prv;
+ struct page *last_pg;
+
+ /*
+ * The algorithm below requires max_segment to be aligned to PAGE_SIZE
+ * otherwise it can overshoot.
+ */
+ max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
+ if (WARN_ON(max_segment < PAGE_SIZE))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
+ return -EOPNOTSUPP;
+
+ if (sgt_append->prv) {
+ unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
+ sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+
+ if (WARN_ON(offset))
+ return -EINVAL;
+
+ /* Merge contiguous pages into the last SG */
+ prv_len = sgt_append->prv->length;
+ if (page_to_pfn(pages[0]) == next_pfn) {
+ last_pg = pfn_to_page(next_pfn - 1);
+ while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
+ if (sgt_append->prv->length + PAGE_SIZE > max_segment)
+ break;
+ sgt_append->prv->length += PAGE_SIZE;
+ last_pg = pages[0];
+ pages++;
+ n_pages--;
+ }
+ if (!n_pages)
+ goto out;
+ }
+ }
+
+ /* compute number of contiguous chunks */
+ chunks = 1;
+ seg_len = 0;
+ for (i = 1; i < n_pages; i++) {
+ seg_len += PAGE_SIZE;
+ if (seg_len >= max_segment ||
+ !pages_are_mergeable(pages[i], pages[i - 1])) {
+ chunks++;
+ seg_len = 0;
+ }
+ }
+
+ /* merging chunks and putting them into the scatterlist */
+ cur_page = 0;
+ for (i = 0; i < chunks; i++) {
+ unsigned int j, chunk_size;
+
+ /* look for the end of the current chunk */
+ seg_len = 0;
+ for (j = cur_page + 1; j < n_pages; j++) {
+ seg_len += PAGE_SIZE;
+ if (seg_len >= max_segment ||
+ !pages_are_mergeable(pages[j], pages[j - 1]))
+ break;
+ }
+
+ /* Pass how many chunks might be left */
+ s = get_next_sg(sgt_append, s, chunks - i + left_pages,
+ gfp_mask);
+ if (IS_ERR(s)) {
+ /*
+ * Adjust entry length to be as before function was
+ * called.
+ */
+ if (sgt_append->prv)
+ sgt_append->prv->length = prv_len;
+ return PTR_ERR(s);
+ }
+ chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
+ sg_set_page(s, pages[cur_page],
+ min_t(unsigned long, size, chunk_size), offset);
+ added_nents++;
+ size -= chunk_size;
+ offset = 0;
+ cur_page = j;
+ }
+ sgt_append->sgt.nents += added_nents;
+ sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
+ sgt_append->prv = s;
+out:
+ if (!left_pages)
+ sg_mark_end(s);
+ return 0;
+}
+EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
+
+/**
+ * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
+ * an array of pages and given maximum
+ * segment.
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @max_segment: Maximum size of a scatterlist element in bytes
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node up to the
+ * maximum size specified in @max_segment. A user may provide an offset at a
+ * start and a size of valid data in a buffer specified by the page array.
+ *
+ * The returned sg table is released by sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ gfp_t gfp_mask)
+{
+ struct sg_append_table append = {};
+ int err;
+
+ err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
+ size, max_segment, 0, gfp_mask);
+ if (err) {
+ sg_free_append_table(&append);
+ return err;
+ }
+ memcpy(sgt, &append.sgt, sizeof(*sgt));
+ WARN_ON(append.total_nents != sgt->orig_nents);
+ return 0;
+}
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
+
+#ifdef CONFIG_SGL_ALLOC
+
+/**
+ * sgl_alloc_order - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist. Must be at least one
+ * @order: Second argument for alloc_pages()
+ * @chainable: Whether or not to allocate an extra element in the scatterlist
+ * for scatterlist chaining purposes
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist that have pages
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
+ */
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p)
+{
+ struct scatterlist *sgl, *sg;
+ struct page *page;
+ unsigned int nent, nalloc;
+ u32 elem_len;
+
+ nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
+ /* Check for integer overflow */
+ if (length > (nent << (PAGE_SHIFT + order)))
+ return NULL;
+ nalloc = nent;
+ if (chainable) {
+ /* Check for integer overflow */
+ if (nalloc + 1 < nalloc)
+ return NULL;
+ nalloc++;
+ }
+ sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
+ gfp & ~GFP_DMA);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, nalloc);
+ sg = sgl;
+ while (length) {
+ elem_len = min_t(u64, length, PAGE_SIZE << order);
+ page = alloc_pages(gfp, order);
+ if (!page) {
+ sgl_free_order(sgl, order);
+ return NULL;
+ }
+
+ sg_set_page(sg, page, elem_len, 0);
+ length -= elem_len;
+ sg = sg_next(sg);
+ }
+ WARN_ONCE(length, "length = %lld\n", length);
+ if (nent_p)
+ *nent_p = nent;
+ return sgl;
+}
+EXPORT_SYMBOL(sgl_alloc_order);
+
+/**
+ * sgl_alloc - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
+ */
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p)
+{
+ return sgl_alloc_order(length, 0, false, gfp, nent_p);
+}
+EXPORT_SYMBOL(sgl_alloc);
+
+/**
+ * sgl_free_n_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @nents: Maximum number of elements to free
+ * @order: Second argument for __free_pages()
+ *
+ * Notes:
+ * - If several scatterlists have been chained and each chain element is
+ * freed separately then it's essential to set nents correctly to avoid that a
+ * page would get freed twice.
+ * - All pages in a chained scatterlist can be freed at once by setting @nents
+ * to a high number.
+ */
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ if (!sg)
+ break;
+ page = sg_page(sg);
+ if (page)
+ __free_pages(page, order);
+ }
+ kfree(sgl);
+}
+EXPORT_SYMBOL(sgl_free_n_order);
+
+/**
+ * sgl_free_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @order: Second argument for __free_pages()
+ */
+void sgl_free_order(struct scatterlist *sgl, int order)
+{
+ sgl_free_n_order(sgl, INT_MAX, order);
+}
+EXPORT_SYMBOL(sgl_free_order);
+
+/**
+ * sgl_free - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ */
+void sgl_free(struct scatterlist *sgl)
+{
+ sgl_free_order(sgl, 0);
+}
+EXPORT_SYMBOL(sgl_free);
+
+#endif /* CONFIG_SGL_ALLOC */
+
+void __sg_page_iter_start(struct sg_page_iter *piter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgoffset)
+{
+ piter->__pg_advance = 0;
+ piter->__nents = nents;
+
+ piter->sg = sglist;
+ piter->sg_pgoffset = pgoffset;
+}
+EXPORT_SYMBOL(__sg_page_iter_start);
+
+static int sg_page_count(struct scatterlist *sg)
+{
+ return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_next(struct sg_page_iter *piter)
+{
+ if (!piter->__nents || !piter->sg)
+ return false;
+
+ piter->sg_pgoffset += piter->__pg_advance;
+ piter->__pg_advance = 1;
+
+ while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
+ piter->sg_pgoffset -= sg_page_count(piter->sg);
+ piter->sg = sg_next(piter->sg);
+ if (!--piter->__nents || !piter->sg)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_next);
+
+static int sg_dma_page_count(struct scatterlist *sg)
+{
+ return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
+{
+ struct sg_page_iter *piter = &dma_iter->base;
+
+ if (!piter->__nents || !piter->sg)
+ return false;
+
+ piter->sg_pgoffset += piter->__pg_advance;
+ piter->__pg_advance = 1;
+
+ while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
+ piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
+ piter->sg = sg_next(piter->sg);
+ if (!--piter->__nents || !piter->sg)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_dma_next);
+
+/**
+ * sg_miter_start - start mapping iteration over a sg list
+ * @miter: sg mapping iter to be started
+ * @sgl: sg list to iterate over
+ * @nents: number of sg entries
+ * @flags: sg iterator flags
+ *
+ * Description:
+ * Starts mapping iterator @miter.
+ *
+ * Context:
+ * Don't care.
+ */
+void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
+ unsigned int nents, unsigned int flags)
+{
+ memset(miter, 0, sizeof(struct sg_mapping_iter));
+
+ __sg_page_iter_start(&miter->piter, sgl, nents, 0);
+ WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
+ miter->__flags = flags;
+}
+EXPORT_SYMBOL(sg_miter_start);
+
+static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
+{
+ if (!miter->__remaining) {
+ struct scatterlist *sg;
+
+ if (!__sg_page_iter_next(&miter->piter))
+ return false;
+
+ sg = miter->piter.sg;
+
+ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
+ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
+ miter->__offset &= PAGE_SIZE - 1;
+ miter->__remaining = sg->offset + sg->length -
+ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
+ miter->__offset;
+ miter->__remaining = min_t(unsigned long, miter->__remaining,
+ PAGE_SIZE - miter->__offset);
+ }
+
+ return true;
+}
+
+/**
+ * sg_miter_skip - reposition mapping iterator
+ * @miter: sg mapping iter to be skipped
+ * @offset: number of bytes to plus the current location
+ *
+ * Description:
+ * Sets the offset of @miter to its current location plus @offset bytes.
+ * If mapping iterator @miter has been proceeded by sg_miter_next(), this
+ * stops @miter.
+ *
+ * Context:
+ * Don't care.
+ *
+ * Returns:
+ * true if @miter contains the valid mapping. false if end of sg
+ * list is reached.
+ */
+bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
+{
+ sg_miter_stop(miter);
+
+ while (offset) {
+ off_t consumed;
+
+ if (!sg_miter_get_next_page(miter))
+ return false;
+
+ consumed = min_t(off_t, offset, miter->__remaining);
+ miter->__offset += consumed;
+ miter->__remaining -= consumed;
+ offset -= consumed;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(sg_miter_skip);
+
+/**
+ * sg_miter_next - proceed mapping iterator to the next mapping
+ * @miter: sg mapping iter to proceed
+ *
+ * Description:
+ * Proceeds @miter to the next mapping. @miter should have been started
+ * using sg_miter_start(). On successful return, @miter->page,
+ * @miter->addr and @miter->length point to the current mapping.
+ *
+ * Context:
+ * May sleep if !SG_MITER_ATOMIC.
+ *
+ * Returns:
+ * true if @miter contains the next mapping. false if end of sg
+ * list is reached.
+ */
+bool sg_miter_next(struct sg_mapping_iter *miter)
+{
+ sg_miter_stop(miter);
+
+ /*
+ * Get to the next page if necessary.
+ * __remaining, __offset is adjusted by sg_miter_stop
+ */
+ if (!sg_miter_get_next_page(miter))
+ return false;
+
+ miter->page = sg_page_iter_page(&miter->piter);
+ miter->consumed = miter->length = miter->__remaining;
+
+ if (miter->__flags & SG_MITER_ATOMIC)
+ miter->addr = kmap_atomic(miter->page) + miter->__offset;
+ else
+ miter->addr = kmap(miter->page) + miter->__offset;
+
+ return true;
+}
+EXPORT_SYMBOL(sg_miter_next);
+
+/**
+ * sg_miter_stop - stop mapping iteration
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * using sg_miter_start(). A stopped iteration can be resumed by
+ * calling sg_miter_next() on it. This is useful when resources (kmap)
+ * need to be released during iteration.
+ *
+ * Context:
+ * Don't care otherwise.
+ */
+void sg_miter_stop(struct sg_mapping_iter *miter)
+{
+ WARN_ON(miter->consumed > miter->length);
+
+ /* drop resources from the last iteration */
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+ miter->__remaining -= miter->consumed;
+
+ if (miter->__flags & SG_MITER_TO_SG)
+ flush_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+ WARN_ON_ONCE(!pagefault_disabled());
+ kunmap_atomic(miter->addr);
+ } else
+ kunmap(miter->page);
+
+ miter->page = NULL;
+ miter->addr = NULL;
+ miter->length = 0;
+ miter->consumed = 0;
+ }
+}
+EXPORT_SYMBOL(sg_miter_stop);
+
+/**
+ * sg_copy_buffer - Copy data between a linear buffer and an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ * @skip: Number of bytes to skip before copying
+ * @to_buffer: transfer direction (true == from an sg list to a
+ * buffer, false == from a buffer to an sg list)
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
+ size_t buflen, off_t skip, bool to_buffer)
+{
+ unsigned int offset = 0;
+ struct sg_mapping_iter miter;
+ unsigned int sg_flags = SG_MITER_ATOMIC;
+
+ if (to_buffer)
+ sg_flags |= SG_MITER_FROM_SG;
+ else
+ sg_flags |= SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ if (!sg_miter_skip(&miter, skip))
+ return 0;
+
+ while ((offset < buflen) && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_buffer)
+ memcpy(buf + offset, miter.addr, len);
+ else
+ memcpy(miter.addr, buf + offset, len);
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+
+ return offset;
+}
+EXPORT_SYMBOL(sg_copy_buffer);
+
+/**
+ * sg_copy_from_buffer - Copy from a linear buffer to an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ const void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
+}
+EXPORT_SYMBOL(sg_copy_from_buffer);
+
+/**
+ * sg_copy_to_buffer - Copy from an SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
+}
+EXPORT_SYMBOL(sg_copy_to_buffer);
+
+/**
+ * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy from
+ * @buflen: The number of bytes to copy
+ * @skip: Number of bytes to skip before copying
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ const void *buf, size_t buflen, off_t skip)
+{
+ return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
+}
+EXPORT_SYMBOL(sg_pcopy_from_buffer);
+
+/**
+ * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buf: Where to copy to
+ * @buflen: The number of bytes to copy
+ * @skip: Number of bytes to skip before copying
+ *
+ * Returns the number of copied bytes.
+ *
+ **/
+size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, off_t skip)
+{
+ return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
+}
+EXPORT_SYMBOL(sg_pcopy_to_buffer);
+
+/**
+ * sg_zero_buffer - Zero-out a part of a SG list
+ * @sgl: The SG list
+ * @nents: Number of SG entries
+ * @buflen: The number of bytes to zero out
+ * @skip: Number of bytes to skip before zeroing
+ *
+ * Returns the number of bytes zeroed.
+ **/
+size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
+ size_t buflen, off_t skip)
+{
+ unsigned int offset = 0;
+ struct sg_mapping_iter miter;
+ unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+ if (!sg_miter_skip(&miter, skip))
+ return false;
+
+ while (offset < buflen && sg_miter_next(&miter)) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+ memset(miter.addr, 0, len);
+
+ offset += len;
+ }
+
+ sg_miter_stop(&miter);
+ return offset;
+}
+EXPORT_SYMBOL(sg_zero_buffer);
+
+/*
+ * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class
+ * iterators, and add them to the scatterlist.
+ */
+static ssize_t extract_user_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ struct page **pages;
+ unsigned int npages;
+ ssize_t ret = 0, res;
+ size_t len, off;
+
+ /* We decant the page list into the tail of the scatterlist */
+ pages = (void *)sgtable->sgl +
+ array_size(sg_max, sizeof(struct scatterlist));
+ pages -= sg_max;
+
+ do {
+ res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
+ extraction_flags, &off);
+ if (res < 0)
+ goto failed;
+
+ len = res;
+ maxsize -= len;
+ ret += len;
+ npages = DIV_ROUND_UP(off + len, PAGE_SIZE);
+ sg_max -= npages;
+
+ for (; npages > 0; npages--) {
+ struct page *page = *pages;
+ size_t seg = min_t(size_t, PAGE_SIZE - off, len);
+
+ *pages++ = NULL;
+ sg_set_page(sg, page, seg, off);
+ sgtable->nents++;
+ sg++;
+ len -= seg;
+ off = 0;
+ }
+ } while (maxsize > 0 && sg_max > 0);
+
+ return ret;
+
+failed:
+ while (sgtable->nents > sgtable->orig_nents)
+ unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
+ return res;
+}
+
+/*
+ * Extract up to sg_max pages from a BVEC-type iterator and add them to the
+ * scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_bvec_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct bio_vec *bv = iter->bvec;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned long start = iter->iov_offset;
+ unsigned int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < iter->nr_segs; i++) {
+ size_t off, len;
+
+ len = bv[i].bv_len;
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ len = min_t(size_t, maxsize, len - start);
+ off = bv[i].bv_offset + start;
+
+ sg_set_page(sg, bv[i].bv_page, len, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ ret += len;
+ maxsize -= len;
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ start = 0;
+ }
+
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/*
+ * Extract up to sg_max pages from a KVEC-type iterator and add them to the
+ * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or
+ * static buffers. The pages are not pinned.
+ */
+static ssize_t extract_kvec_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ const struct kvec *kv = iter->kvec;
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ unsigned long start = iter->iov_offset;
+ unsigned int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < iter->nr_segs; i++) {
+ struct page *page;
+ unsigned long kaddr;
+ size_t off, len, seg;
+
+ len = kv[i].iov_len;
+ if (start >= len) {
+ start -= len;
+ continue;
+ }
+
+ kaddr = (unsigned long)kv[i].iov_base + start;
+ off = kaddr & ~PAGE_MASK;
+ len = min_t(size_t, maxsize, len - start);
+ kaddr &= PAGE_MASK;
+
+ maxsize -= len;
+ ret += len;
+ do {
+ seg = min_t(size_t, len, PAGE_SIZE - off);
+ if (is_vmalloc_or_module_addr((void *)kaddr))
+ page = vmalloc_to_page((void *)kaddr);
+ else
+ page = virt_to_page((void *)kaddr);
+
+ sg_set_page(sg, page, len, off);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ len -= seg;
+ kaddr += PAGE_SIZE;
+ off = 0;
+ } while (len > 0 && sg_max > 0);
+
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ start = 0;
+ }
+
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/*
+ * Extract up to sg_max folios from an XARRAY-type iterator and add them to
+ * the scatterlist. The pages are not pinned.
+ */
+static ssize_t extract_xarray_to_sg(struct iov_iter *iter,
+ ssize_t maxsize,
+ struct sg_table *sgtable,
+ unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct scatterlist *sg = sgtable->sgl + sgtable->nents;
+ struct xarray *xa = iter->xarray;
+ struct folio *folio;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ ssize_t ret = 0;
+ size_t offset, len;
+ XA_STATE(xas, xa, index);
+
+ rcu_read_lock();
+
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start);
+ len = min_t(size_t, maxsize, folio_size(folio) - offset);
+
+ sg_set_page(sg, folio_page(folio, 0), len, offset);
+ sgtable->nents++;
+ sg++;
+ sg_max--;
+
+ maxsize -= len;
+ ret += len;
+ if (maxsize <= 0 || sg_max == 0)
+ break;
+ }
+
+ rcu_read_unlock();
+ if (ret > 0)
+ iov_iter_advance(iter, ret);
+ return ret;
+}
+
+/**
+ * extract_iter_to_sg - Extract pages from an iterator and add to an sglist
+ * @iter: The iterator to extract from
+ * @maxsize: The amount of iterator to copy
+ * @sgtable: The scatterlist table to fill in
+ * @sg_max: Maximum number of elements in @sgtable that may be filled
+ * @extraction_flags: Flags to qualify the request
+ *
+ * Extract the page fragments from the given amount of the source iterator and
+ * add them to a scatterlist that refers to all of those bits, to a maximum
+ * addition of @sg_max elements.
+ *
+ * The pages referred to by UBUF- and IOVEC-type iterators are extracted and
+ * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE-
+ * and DISCARD-type are not supported.
+ *
+ * No end mark is placed on the scatterlist; that's left to the caller.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
+ * be allowed on the pages extracted.
+ *
+ * If successful, @sgtable->nents is updated to include the number of elements
+ * added and the number of bytes added is returned. @sgtable->orig_nents is
+ * left unaltered.
+ *
+ * The iov_iter_extract_mode() function should be used to query how cleanup
+ * should be performed.
+ */
+ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags)
+{
+ if (maxsize == 0)
+ return 0;
+
+ switch (iov_iter_type(iter)) {
+ case ITER_UBUF:
+ case ITER_IOVEC:
+ return extract_user_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_BVEC:
+ return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_KVEC:
+ return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ case ITER_XARRAY:
+ return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max,
+ extraction_flags);
+ default:
+ pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter));
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(extract_iter_to_sg);
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
new file mode 100644
index 000000000..45c450f42
--- /dev/null
+++ b/lib/seq_buf.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * seq_buf.c
+ *
+ * Copyright (C) 2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * The seq_buf is a handy tool that allows you to pass a descriptor around
+ * to a buffer that other functions can write to. It is similar to the
+ * seq_file functionality but has some differences.
+ *
+ * To use it, the seq_buf must be initialized with seq_buf_init().
+ * This will set up the counters within the descriptor. You can call
+ * seq_buf_init() more than once to reset the seq_buf to start
+ * from scratch.
+ */
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/seq_buf.h>
+
+/**
+ * seq_buf_can_fit - can the new data fit in the current buffer?
+ * @s: the seq_buf descriptor
+ * @len: The length to see if it can fit in the current buffer
+ *
+ * Returns true if there's enough unused space in the seq_buf buffer
+ * to fit the amount of new data according to @len.
+ */
+static bool seq_buf_can_fit(struct seq_buf *s, size_t len)
+{
+ return s->len + len <= s->size;
+}
+
+/**
+ * seq_buf_print_seq - move the contents of seq_buf into a seq_file
+ * @m: the seq_file descriptor that is the destination
+ * @s: the seq_buf descriptor that is the source.
+ *
+ * Returns zero on success, non zero otherwise
+ */
+int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s)
+{
+ unsigned int len = seq_buf_used(s);
+
+ return seq_write(m, s->buffer, len);
+}
+
+/**
+ * seq_buf_vprintf - sequence printing of information.
+ * @s: seq_buf descriptor
+ * @fmt: printf format string
+ * @args: va_list of arguments from a printf() type function
+ *
+ * Writes a vnprintf() format into the sequencce buffer.
+ *
+ * Returns zero on success, -1 on overflow.
+ */
+int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
+{
+ int len;
+
+ WARN_ON(s->size == 0);
+
+ if (s->len < s->size) {
+ len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
+ if (s->len + len < s->size) {
+ s->len += len;
+ return 0;
+ }
+ }
+ seq_buf_set_overflow(s);
+ return -1;
+}
+
+/**
+ * seq_buf_printf - sequence printing of information
+ * @s: seq_buf descriptor
+ * @fmt: printf format string
+ *
+ * Writes a printf() format into the sequence buffer.
+ *
+ * Returns zero on success, -1 on overflow.
+ */
+int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, fmt);
+ ret = seq_buf_vprintf(s, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(seq_buf_printf);
+
+/**
+ * seq_buf_do_printk - printk seq_buf line by line
+ * @s: seq_buf descriptor
+ * @lvl: printk level
+ *
+ * printk()-s a multi-line sequential buffer line by line. The function
+ * makes sure that the buffer in @s is nul terminated and safe to read
+ * as a string.
+ */
+void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
+{
+ const char *start, *lf;
+
+ if (s->size == 0 || s->len == 0)
+ return;
+
+ seq_buf_terminate(s);
+
+ start = s->buffer;
+ while ((lf = strchr(start, '\n'))) {
+ int len = lf - start + 1;
+
+ printk("%s%.*s", lvl, len, start);
+ start = ++lf;
+ }
+
+ /* No trailing LF */
+ if (start < s->buffer + s->len)
+ printk("%s%s\n", lvl, start);
+}
+EXPORT_SYMBOL_GPL(seq_buf_do_printk);
+
+#ifdef CONFIG_BINARY_PRINTF
+/**
+ * seq_buf_bprintf - Write the printf string from binary arguments
+ * @s: seq_buf descriptor
+ * @fmt: The format string for the @binary arguments
+ * @binary: The binary arguments for @fmt.
+ *
+ * When recording in a fast path, a printf may be recorded with just
+ * saving the format and the arguments as they were passed to the
+ * function, instead of wasting cycles converting the arguments into
+ * ASCII characters. Instead, the arguments are saved in a 32 bit
+ * word array that is defined by the format string constraints.
+ *
+ * This function will take the format and the binary array and finish
+ * the conversion into the ASCII string within the buffer.
+ *
+ * Returns zero on success, -1 on overflow.
+ */
+int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
+{
+ unsigned int len = seq_buf_buffer_left(s);
+ int ret;
+
+ WARN_ON(s->size == 0);
+
+ if (s->len < s->size) {
+ ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+ if (s->len + ret < s->size) {
+ s->len += ret;
+ return 0;
+ }
+ }
+ seq_buf_set_overflow(s);
+ return -1;
+}
+#endif /* CONFIG_BINARY_PRINTF */
+
+/**
+ * seq_buf_puts - sequence printing of simple string
+ * @s: seq_buf descriptor
+ * @str: simple string to record
+ *
+ * Copy a simple string into the sequence buffer.
+ *
+ * Returns zero on success, -1 on overflow
+ */
+int seq_buf_puts(struct seq_buf *s, const char *str)
+{
+ size_t len = strlen(str);
+
+ WARN_ON(s->size == 0);
+
+ /* Add 1 to len for the trailing null byte which must be there */
+ len += 1;
+
+ if (seq_buf_can_fit(s, len)) {
+ memcpy(s->buffer + s->len, str, len);
+ /* Don't count the trailing null byte against the capacity */
+ s->len += len - 1;
+ return 0;
+ }
+ seq_buf_set_overflow(s);
+ return -1;
+}
+
+/**
+ * seq_buf_putc - sequence printing of simple character
+ * @s: seq_buf descriptor
+ * @c: simple character to record
+ *
+ * Copy a single character into the sequence buffer.
+ *
+ * Returns zero on success, -1 on overflow
+ */
+int seq_buf_putc(struct seq_buf *s, unsigned char c)
+{
+ WARN_ON(s->size == 0);
+
+ if (seq_buf_can_fit(s, 1)) {
+ s->buffer[s->len++] = c;
+ return 0;
+ }
+ seq_buf_set_overflow(s);
+ return -1;
+}
+
+/**
+ * seq_buf_putmem - write raw data into the sequenc buffer
+ * @s: seq_buf descriptor
+ * @mem: The raw memory to copy into the buffer
+ * @len: The length of the raw memory to copy (in bytes)
+ *
+ * There may be cases where raw memory needs to be written into the
+ * buffer and a strcpy() would not work. Using this function allows
+ * for such cases.
+ *
+ * Returns zero on success, -1 on overflow
+ */
+int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
+{
+ WARN_ON(s->size == 0);
+
+ if (seq_buf_can_fit(s, len)) {
+ memcpy(s->buffer + s->len, mem, len);
+ s->len += len;
+ return 0;
+ }
+ seq_buf_set_overflow(s);
+ return -1;
+}
+
+#define MAX_MEMHEX_BYTES 8U
+#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
+
+/**
+ * seq_buf_putmem_hex - write raw memory into the buffer in ASCII hex
+ * @s: seq_buf descriptor
+ * @mem: The raw memory to write its hex ASCII representation of
+ * @len: The length of the raw memory to copy (in bytes)
+ *
+ * This is similar to seq_buf_putmem() except instead of just copying the
+ * raw memory into the buffer it writes its ASCII representation of it
+ * in hex characters.
+ *
+ * Returns zero on success, -1 on overflow
+ */
+int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
+ unsigned int len)
+{
+ unsigned char hex[HEX_CHARS];
+ const unsigned char *data = mem;
+ unsigned int start_len;
+ int i, j;
+
+ WARN_ON(s->size == 0);
+
+ BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS);
+
+ while (len) {
+ start_len = min(len, MAX_MEMHEX_BYTES);
+#ifdef __BIG_ENDIAN
+ for (i = 0, j = 0; i < start_len; i++) {
+#else
+ for (i = start_len-1, j = 0; i >= 0; i--) {
+#endif
+ hex[j++] = hex_asc_hi(data[i]);
+ hex[j++] = hex_asc_lo(data[i]);
+ }
+ if (WARN_ON_ONCE(j == 0 || j/2 > len))
+ break;
+
+ /* j increments twice per loop */
+ hex[j++] = ' ';
+
+ seq_buf_putmem(s, hex, j);
+ if (seq_buf_has_overflowed(s))
+ return -1;
+
+ len -= start_len;
+ data += start_len;
+ }
+ return 0;
+}
+
+/**
+ * seq_buf_path - copy a path into the sequence buffer
+ * @s: seq_buf descriptor
+ * @path: path to write into the sequence buffer.
+ * @esc: set of characters to escape in the output
+ *
+ * Write a path name into the sequence buffer.
+ *
+ * Returns the number of written bytes on success, -1 on overflow
+ */
+int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
+{
+ char *buf;
+ size_t size = seq_buf_get_buf(s, &buf);
+ int res = -1;
+
+ WARN_ON(s->size == 0);
+
+ if (size) {
+ char *p = d_path(path, buf, size);
+ if (!IS_ERR(p)) {
+ char *end = mangle_path(buf, p, esc);
+ if (end)
+ res = end - buf;
+ }
+ }
+ seq_buf_commit(s, res);
+
+ return res;
+}
+
+/**
+ * seq_buf_to_user - copy the sequence buffer to user space
+ * @s: seq_buf descriptor
+ * @ubuf: The userspace memory location to copy to
+ * @cnt: The amount to copy
+ *
+ * Copies the sequence buffer into the userspace memory pointed to
+ * by @ubuf. It starts from the last read position (@s->readpos)
+ * and writes up to @cnt characters or till it reaches the end of
+ * the content in the buffer (@s->len), which ever comes first.
+ *
+ * On success, it returns a positive number of the number of bytes
+ * it copied.
+ *
+ * On failure it returns -EBUSY if all of the content in the
+ * sequence has been already read, which includes nothing in the
+ * sequence (@s->len == @s->readpos).
+ *
+ * Returns -EFAULT if the copy to userspace fails.
+ */
+int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
+{
+ int len;
+ int ret;
+
+ if (!cnt)
+ return 0;
+
+ len = seq_buf_used(s);
+
+ if (len <= s->readpos)
+ return -EBUSY;
+
+ len -= s->readpos;
+ if (cnt > len)
+ cnt = len;
+ ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+ if (ret == cnt)
+ return -EFAULT;
+
+ cnt -= ret;
+
+ s->readpos += cnt;
+ return cnt;
+}
+
+/**
+ * seq_buf_hex_dump - print formatted hex dump into the sequence buffer
+ * @s: seq_buf descriptor
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @ascii: include ASCII after the hex output
+ *
+ * Function is an analogue of print_hex_dump() and thus has similar interface.
+ *
+ * linebuf size is maximal length for one line.
+ * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for
+ * separating space
+ * 2 - spaces separating hex dump and ascii representation
+ * 32 - ascii representation
+ * 1 - terminating '\0'
+ *
+ * Returns zero on success, -1 on overflow
+ */
+int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ int ret;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ ret = seq_buf_printf(s, "%s%p: %s\n",
+ prefix_str, ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ ret = seq_buf_printf(s, "%s%.8x: %s\n",
+ prefix_str, i, linebuf);
+ break;
+ default:
+ ret = seq_buf_printf(s, "%s%s\n", prefix_str, linebuf);
+ break;
+ }
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
new file mode 100644
index 000000000..9bfe60ca3
--- /dev/null
+++ b/lib/sg_pool.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/init.h>
+#include <linux/scatterlist.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+
+#define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools)
+#define SG_MEMPOOL_SIZE 2
+
+struct sg_pool {
+ size_t size;
+ char *name;
+ struct kmem_cache *slab;
+ mempool_t *pool;
+};
+
+#define SP(x) { .size = x, "sgpool-" __stringify(x) }
+#if (SG_CHUNK_SIZE < 32)
+#error SG_CHUNK_SIZE is too small (must be 32 or greater)
+#endif
+static struct sg_pool sg_pools[] = {
+ SP(8),
+ SP(16),
+#if (SG_CHUNK_SIZE > 32)
+ SP(32),
+#if (SG_CHUNK_SIZE > 64)
+ SP(64),
+#if (SG_CHUNK_SIZE > 128)
+ SP(128),
+#if (SG_CHUNK_SIZE > 256)
+#error SG_CHUNK_SIZE is too large (256 MAX)
+#endif
+#endif
+#endif
+#endif
+ SP(SG_CHUNK_SIZE)
+};
+#undef SP
+
+static inline unsigned int sg_pool_index(unsigned short nents)
+{
+ unsigned int index;
+
+ BUG_ON(nents > SG_CHUNK_SIZE);
+
+ if (nents <= 8)
+ index = 0;
+ else
+ index = get_count_order(nents) - 3;
+
+ return index;
+}
+
+static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
+{
+ struct sg_pool *sgp;
+
+ sgp = sg_pools + sg_pool_index(nents);
+ mempool_free(sgl, sgp->pool);
+}
+
+static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
+{
+ struct sg_pool *sgp;
+
+ sgp = sg_pools + sg_pool_index(nents);
+ return mempool_alloc(sgp->pool, gfp_mask);
+}
+
+/**
+ * sg_free_table_chained - Free a previously mapped sg table
+ * @table: The sg table header to use
+ * @nents_first_chunk: size of the first_chunk SGL passed to
+ * sg_alloc_table_chained
+ *
+ * Description:
+ * Free an sg table previously allocated and setup with
+ * sg_alloc_table_chained().
+ *
+ * @nents_first_chunk has to be same with that same parameter passed
+ * to sg_alloc_table_chained().
+ *
+ **/
+void sg_free_table_chained(struct sg_table *table,
+ unsigned nents_first_chunk)
+{
+ if (table->orig_nents <= nents_first_chunk)
+ return;
+
+ if (nents_first_chunk == 1)
+ nents_first_chunk = 0;
+
+ __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free,
+ table->orig_nents);
+}
+EXPORT_SYMBOL_GPL(sg_free_table_chained);
+
+/**
+ * sg_alloc_table_chained - Allocate and chain SGLs in an sg table
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @first_chunk: first SGL
+ * @nents_first_chunk: number of the SGL of @first_chunk
+ *
+ * Description:
+ * Allocate and chain SGLs in an sg table. If @nents@ is larger than
+ * @nents_first_chunk a chained sg table will be setup. @first_chunk is
+ * ignored if nents_first_chunk <= 1 because user expects the SGL points
+ * non-chain SGL.
+ *
+ **/
+int sg_alloc_table_chained(struct sg_table *table, int nents,
+ struct scatterlist *first_chunk, unsigned nents_first_chunk)
+{
+ int ret;
+
+ BUG_ON(!nents);
+
+ if (first_chunk && nents_first_chunk) {
+ if (nents <= nents_first_chunk) {
+ table->nents = table->orig_nents = nents;
+ sg_init_table(table->sgl, nents);
+ return 0;
+ }
+ }
+
+ /* User supposes that the 1st SGL includes real entry */
+ if (nents_first_chunk <= 1) {
+ first_chunk = NULL;
+ nents_first_chunk = 0;
+ }
+
+ ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
+ first_chunk, nents_first_chunk,
+ GFP_ATOMIC, sg_pool_alloc);
+ if (unlikely(ret))
+ sg_free_table_chained(table, nents_first_chunk);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
+
+static __init int sg_pool_init(void)
+{
+ int i;
+
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct sg_pool *sgp = sg_pools + i;
+ int size = sgp->size * sizeof(struct scatterlist);
+
+ sgp->slab = kmem_cache_create(sgp->name, size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sgp->slab) {
+ printk(KERN_ERR "SG_POOL: can't init sg slab %s\n",
+ sgp->name);
+ goto cleanup_sdb;
+ }
+
+ sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
+ sgp->slab);
+ if (!sgp->pool) {
+ printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n",
+ sgp->name);
+ goto cleanup_sdb;
+ }
+ }
+
+ return 0;
+
+cleanup_sdb:
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct sg_pool *sgp = sg_pools + i;
+
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
+ }
+
+ return -ENOMEM;
+}
+
+subsys_initcall(sg_pool_init);
diff --git a/lib/sg_split.c b/lib/sg_split.c
new file mode 100644
index 000000000..60a0babeb
--- /dev/null
+++ b/lib/sg_split.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * Scatterlist splitting helpers.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct sg_splitter {
+ struct scatterlist *in_sg0;
+ int nents;
+ off_t skip_sg0;
+ unsigned int length_last_sg;
+
+ struct scatterlist *out_sg;
+};
+
+static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
+ off_t skip, const size_t *sizes,
+ struct sg_splitter *splitters, bool mapped)
+{
+ int i;
+ unsigned int sglen;
+ size_t size = sizes[0], len;
+ struct sg_splitter *curr = splitters;
+ struct scatterlist *sg;
+
+ for (i = 0; i < nb_splits; i++) {
+ splitters[i].in_sg0 = NULL;
+ splitters[i].nents = 0;
+ }
+
+ for_each_sg(in, sg, nents, i) {
+ sglen = mapped ? sg_dma_len(sg) : sg->length;
+ if (skip > sglen) {
+ skip -= sglen;
+ continue;
+ }
+
+ len = min_t(size_t, size, sglen - skip);
+ if (!curr->in_sg0) {
+ curr->in_sg0 = sg;
+ curr->skip_sg0 = skip;
+ }
+ size -= len;
+ curr->nents++;
+ curr->length_last_sg = len;
+
+ while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
+ curr++;
+ size = *(++sizes);
+ skip += len;
+ len = min_t(size_t, size, sglen - skip);
+
+ curr->in_sg0 = sg;
+ curr->skip_sg0 = skip;
+ curr->nents = 1;
+ curr->length_last_sg = len;
+ size -= len;
+ }
+ skip = 0;
+
+ if (!size && --nb_splits > 0) {
+ curr++;
+ size = *(++sizes);
+ }
+
+ if (!nb_splits)
+ break;
+ }
+
+ return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
+}
+
+static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
+{
+ int i, j;
+ struct scatterlist *in_sg, *out_sg;
+ struct sg_splitter *split;
+
+ for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+ in_sg = split->in_sg0;
+ out_sg = split->out_sg;
+ for (j = 0; j < split->nents; j++, out_sg++) {
+ *out_sg = *in_sg;
+ if (!j) {
+ out_sg->offset += split->skip_sg0;
+ out_sg->length -= split->skip_sg0;
+ } else {
+ out_sg->offset = 0;
+ }
+ sg_dma_address(out_sg) = 0;
+ sg_dma_len(out_sg) = 0;
+ in_sg = sg_next(in_sg);
+ }
+ out_sg[-1].length = split->length_last_sg;
+ sg_mark_end(out_sg - 1);
+ }
+}
+
+static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
+{
+ int i, j;
+ struct scatterlist *in_sg, *out_sg;
+ struct sg_splitter *split;
+
+ for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+ in_sg = split->in_sg0;
+ out_sg = split->out_sg;
+ for (j = 0; j < split->nents; j++, out_sg++) {
+ sg_dma_address(out_sg) = sg_dma_address(in_sg);
+ sg_dma_len(out_sg) = sg_dma_len(in_sg);
+ if (!j) {
+ sg_dma_address(out_sg) += split->skip_sg0;
+ sg_dma_len(out_sg) -= split->skip_sg0;
+ }
+ in_sg = sg_next(in_sg);
+ }
+ sg_dma_len(--out_sg) = split->length_last_sg;
+ }
+}
+
+/**
+ * sg_split - split a scatterlist into several scatterlists
+ * @in: the input sg list
+ * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
+ * @skip: the number of bytes to skip in the input sg list
+ * @nb_splits: the number of desired sg outputs
+ * @split_sizes: the respective size of each output sg list in bytes
+ * @out: an array where to store the allocated output sg lists
+ * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
+ * be NULL if sglist not already mapped (in_mapped_nents = 0)
+ * @gfp_mask: the allocation flag
+ *
+ * This function splits the input sg list into nb_splits sg lists, which are
+ * allocated and stored into out.
+ * The @in is split into :
+ * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
+ * - @out[1], which covers bytes [@skip + split_sizes[0] ..
+ * @skip + @split_sizes[0] + @split_sizes[1] -1]
+ * etc ...
+ * It will be the caller's duty to kfree() out array members.
+ *
+ * Returns 0 upon success, or error code
+ */
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+ const off_t skip, const int nb_splits,
+ const size_t *split_sizes,
+ struct scatterlist **out, int *out_mapped_nents,
+ gfp_t gfp_mask)
+{
+ int i, ret;
+ struct sg_splitter *splitters;
+
+ splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
+ if (!splitters)
+ return -ENOMEM;
+
+ ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
+ splitters, false);
+ if (ret < 0)
+ goto err;
+
+ ret = -ENOMEM;
+ for (i = 0; i < nb_splits; i++) {
+ splitters[i].out_sg = kmalloc_array(splitters[i].nents,
+ sizeof(struct scatterlist),
+ gfp_mask);
+ if (!splitters[i].out_sg)
+ goto err;
+ }
+
+ /*
+ * The order of these 3 calls is important and should be kept.
+ */
+ sg_split_phys(splitters, nb_splits);
+ if (in_mapped_nents) {
+ ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+ split_sizes, splitters, true);
+ if (ret < 0)
+ goto err;
+ sg_split_mapped(splitters, nb_splits);
+ }
+
+ for (i = 0; i < nb_splits; i++) {
+ out[i] = splitters[i].out_sg;
+ if (out_mapped_nents)
+ out_mapped_nents[i] = splitters[i].nents;
+ }
+
+ kfree(splitters);
+ return 0;
+
+err:
+ for (i = 0; i < nb_splits; i++)
+ kfree(splitters[i].out_sg);
+ kfree(splitters);
+ return ret;
+}
+EXPORT_SYMBOL(sg_split);
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644
index 000000000..15bc5b6f3
--- /dev/null
+++ b/lib/siphash.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define PREAMBLE(len) \
+ u64 v0 = SIPHASH_CONST_0; \
+ u64 v1 = SIPHASH_CONST_1; \
+ u64 v2 = SIPHASH_CONST_2; \
+ u64 v3 = SIPHASH_CONST_3; \
+ u64 b = ((u64)(len)) << 56; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define POSTAMBLE \
+ v3 ^= b; \
+ SIPROUND; \
+ SIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ SIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ PREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = le64_to_cpup(data);
+ v3 ^= m;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
+ case 4: b |= le32_to_cpup(data); break;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+#endif
+ POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+#endif
+
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ PREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = get_unaligned_le64(data);
+ v3 ^= m;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
+ case 4: b |= get_unaligned_le32(end); break;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+#endif
+ POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+ PREAMBLE(8)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+ PREAMBLE(16)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+ const siphash_key_t *key)
+{
+ PREAMBLE(24)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= third;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+ const u64 forth, const siphash_key_t *key)
+{
+ PREAMBLE(32)
+ v3 ^= first;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= third;
+ v3 ^= forth;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= forth;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+ PREAMBLE(4)
+ b |= first;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+ const siphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ PREAMBLE(12)
+ v3 ^= combined;
+ SIPROUND;
+ SIPROUND;
+ v0 ^= combined;
+ b |= third;
+ POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+ v3 ^= b; \
+ HSIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ HSIPROUND; \
+ HSIPROUND; \
+ HSIPROUND; \
+ return (v0 ^ v1) ^ (v2 ^ v3);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = le64_to_cpup(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
+ case 4: b |= le32_to_cpup(data); break;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+#endif
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
+
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u64));
+ const u8 left = len & (sizeof(u64) - 1);
+ u64 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u64)) {
+ m = get_unaligned_le64(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+ if (left)
+ b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+ bytemask_from_count(left)));
+#else
+ switch (left) {
+ case 7: b |= ((u64)end[6]) << 48; fallthrough;
+ case 6: b |= ((u64)end[5]) << 40; fallthrough;
+ case 5: b |= ((u64)end[4]) << 32; fallthrough;
+ case 4: b |= get_unaligned_le32(end); break;
+ case 3: b |= ((u64)end[2]) << 16; fallthrough;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+#endif
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+ HPREAMBLE(4)
+ b |= first;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(8)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+ const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(12)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ b |= third;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+ const u32 forth, const hsiphash_key_t *key)
+{
+ u64 combined = (u64)second << 32 | first;
+ HPREAMBLE(16)
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ combined = (u64)forth << 32 | third;
+ v3 ^= combined;
+ HSIPROUND;
+ v0 ^= combined;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3)
+
+#define HPREAMBLE(len) \
+ u32 v0 = HSIPHASH_CONST_0; \
+ u32 v1 = HSIPHASH_CONST_1; \
+ u32 v2 = HSIPHASH_CONST_2; \
+ u32 v3 = HSIPHASH_CONST_3; \
+ u32 b = ((u32)(len)) << 24; \
+ v3 ^= key->key[1]; \
+ v2 ^= key->key[0]; \
+ v1 ^= key->key[1]; \
+ v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+ v3 ^= b; \
+ HSIPROUND; \
+ v0 ^= b; \
+ v2 ^= 0xff; \
+ HSIPROUND; \
+ HSIPROUND; \
+ HSIPROUND; \
+ return v1 ^ v3;
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u32));
+ const u8 left = len & (sizeof(u32) - 1);
+ u32 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u32)) {
+ m = le32_to_cpup(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+ switch (left) {
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
+ case 2: b |= le16_to_cpup(data); break;
+ case 1: b |= end[0];
+ }
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
+
+u32 __hsiphash_unaligned(const void *data, size_t len,
+ const hsiphash_key_t *key)
+{
+ const u8 *end = data + len - (len % sizeof(u32));
+ const u8 left = len & (sizeof(u32) - 1);
+ u32 m;
+ HPREAMBLE(len)
+ for (; data != end; data += sizeof(u32)) {
+ m = get_unaligned_le32(data);
+ v3 ^= m;
+ HSIPROUND;
+ v0 ^= m;
+ }
+ switch (left) {
+ case 3: b |= ((u32)end[2]) << 16; fallthrough;
+ case 2: b |= get_unaligned_le16(end); break;
+ case 1: b |= end[0];
+ }
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+ HPREAMBLE(4)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+ HPREAMBLE(8)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+ const hsiphash_key_t *key)
+{
+ HPREAMBLE(12)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ HSIPROUND;
+ v0 ^= third;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+ const u32 forth, const hsiphash_key_t *key)
+{
+ HPREAMBLE(16)
+ v3 ^= first;
+ HSIPROUND;
+ v0 ^= first;
+ v3 ^= second;
+ HSIPROUND;
+ v0 ^= second;
+ v3 ^= third;
+ HSIPROUND;
+ v0 ^= third;
+ v3 ^= forth;
+ HSIPROUND;
+ v0 ^= forth;
+ HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/lib/siphash_kunit.c b/lib/siphash_kunit.c
new file mode 100644
index 000000000..a3c697e8b
--- /dev/null
+++ b/lib/siphash_kunit.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Test cases for siphash.c
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ * https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+ 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+ 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+ 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+ 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+ 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+ 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+ 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+ 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+ 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+ 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+ 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+ 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+ 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+ 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+ 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+ 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+ 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+ 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+ 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+ 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+ 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+ 0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+ 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+ 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+ 0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+ 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+ 0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+ 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+ 0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+ 0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+ 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+ 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+ 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+ 0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+ 0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+ 0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+ 0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+ 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+ 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+ 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+ 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+ 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+ 0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+ {{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+ 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+ 0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+ 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+ 0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+ 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+ 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+ 0x06712339U, 0x522aca67U, 0x911bb605U,
+ 0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+ 0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+ 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+ 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+ 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+ 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+ 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+ 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+ 0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+ 0x007783ebU, 0x95766243U, 0xab639262U,
+ 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+ 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+ 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+ 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+ 0x87178304U
+};
+#endif
+
+#define chk(hash, vector, fmt...) \
+ KUNIT_EXPECT_EQ_MSG(test, hash, vector, fmt)
+
+static void siphash_test(struct kunit *test)
+{
+ u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+ u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+ u8 i;
+
+ for (i = 0; i < 64; ++i) {
+ in[i] = i;
+ in_unaligned[i + 1] = i;
+ chk(siphash(in, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test aligned %u: FAIL", i + 1);
+ chk(siphash(in_unaligned + 1, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test unaligned %u: FAIL", i + 1);
+ chk(hsiphash(in, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test aligned %u: FAIL", i + 1);
+ chk(hsiphash(in_unaligned + 1, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test unaligned %u: FAIL", i + 1);
+ }
+ chk(siphash_1u64(0x0706050403020100ULL, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 1u64: FAIL");
+ chk(siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 2u64: FAIL");
+ chk(siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, &test_key_siphash),
+ test_vectors_siphash[24],
+ "siphash self-test 3u64: FAIL");
+ chk(siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+ &test_key_siphash),
+ test_vectors_siphash[32],
+ "siphash self-test 4u64: FAIL");
+ chk(siphash_1u32(0x03020100U, &test_key_siphash),
+ test_vectors_siphash[4],
+ "siphash self-test 1u32: FAIL");
+ chk(siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 2u32: FAIL");
+ chk(siphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_siphash),
+ test_vectors_siphash[12],
+ "siphash self-test 3u32: FAIL");
+ chk(siphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 4u32: FAIL");
+ chk(hsiphash_1u32(0x03020100U, &test_key_hsiphash),
+ test_vectors_hsiphash[4],
+ "hsiphash self-test 1u32: FAIL");
+ chk(hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash),
+ test_vectors_hsiphash[8],
+ "hsiphash self-test 2u32: FAIL");
+ chk(hsiphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_hsiphash),
+ test_vectors_hsiphash[12],
+ "hsiphash self-test 3u32: FAIL");
+ chk(hsiphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash),
+ test_vectors_hsiphash[16],
+ "hsiphash self-test 4u32: FAIL");
+}
+
+static struct kunit_case siphash_test_cases[] = {
+ KUNIT_CASE(siphash_test),
+ {}
+};
+
+static struct kunit_suite siphash_test_suite = {
+ .name = "siphash",
+ .test_cases = siphash_test_cases,
+};
+
+kunit_test_suite(siphash_test_suite);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
new file mode 100644
index 000000000..d4a3730b0
--- /dev/null
+++ b/lib/slub_kunit.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "../mm/slab.h"
+
+static struct kunit_resource resource;
+static int slab_errors;
+
+/*
+ * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
+ * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
+ * object from kfence pool, where the operation could be caught by both
+ * our test and kfence sanity check.
+ */
+static struct kmem_cache *test_kmem_cache_create(const char *name,
+ unsigned int size, slab_flags_t flags)
+{
+ struct kmem_cache *s = kmem_cache_create(name, size, 0,
+ (flags | SLAB_NO_USER_FLAGS), NULL);
+ s->flags |= SLAB_SKIP_KFENCE;
+ return s;
+}
+
+static void test_clobber_zone(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
+ SLAB_RED_ZONE);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ p[64] = 0x12;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+#ifndef CONFIG_KASAN
+static void test_next_pointer(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+ unsigned long tmp;
+ unsigned long *ptr_addr;
+
+ kmem_cache_free(s, p);
+
+ ptr_addr = (unsigned long *)(p + s->offset);
+ tmp = *ptr_addr;
+ p[s->offset] = 0x12;
+
+ /*
+ * Expecting three errors.
+ * One for the corrupted freechain and the other one for the wrong
+ * count of objects in use. The third error is fixing broken cache.
+ */
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+
+ /*
+ * Try to repair corrupted freepointer.
+ * Still expecting two errors. The first for the wrong count
+ * of objects in use.
+ * The second error is for fixing broken cache.
+ */
+ *ptr_addr = tmp;
+ slab_errors = 0;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ /*
+ * Previous validation repaired the count of objects in use.
+ * Now expecting no error.
+ */
+ slab_errors = 0;
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_first_word(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ *p = 0x78;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+
+static void test_clobber_50th_byte(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
+ 64, SLAB_POISON);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_free(s, p);
+ p[50] = 0x9a;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kmem_cache_destroy(s);
+}
+#endif
+
+static void test_clobber_redzone_free(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
+ SLAB_RED_ZONE);
+ u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kasan_disable_current();
+ kmem_cache_free(s, p);
+ p[64] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
+static void test_kmalloc_redzone_access(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+ u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
+
+ kasan_disable_current();
+
+ /* Suppress the -Warray-bounds warning */
+ OPTIMIZER_HIDE_VAR(p);
+ p[18] = 0xab;
+ p[19] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
+static int test_init(struct kunit *test)
+{
+ slab_errors = 0;
+
+ kunit_add_named_resource(test, NULL, NULL, &resource,
+ "slab_errors", &slab_errors);
+ return 0;
+}
+
+static struct kunit_case test_cases[] = {
+ KUNIT_CASE(test_clobber_zone),
+
+#ifndef CONFIG_KASAN
+ KUNIT_CASE(test_next_pointer),
+ KUNIT_CASE(test_first_word),
+ KUNIT_CASE(test_clobber_50th_byte),
+#endif
+
+ KUNIT_CASE(test_clobber_redzone_free),
+ KUNIT_CASE(test_kmalloc_redzone_access),
+ {}
+};
+
+static struct kunit_suite test_suite = {
+ .name = "slub_test",
+ .init = test_init,
+ .test_cases = test_cases,
+};
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644
index 000000000..a2bb7738c
--- /dev/null
+++ b/lib/smp_processor_id.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * lib/smp_processor_id.c
+ *
+ * DEBUG_PREEMPT variant of smp_processor_id().
+ */
+#include <linux/export.h>
+#include <linux/kprobes.h>
+#include <linux/sched.h>
+
+noinstr static
+unsigned int check_preemption_disabled(const char *what1, const char *what2)
+{
+ int this_cpu = raw_smp_processor_id();
+
+ if (likely(preempt_count()))
+ goto out;
+
+ if (irqs_disabled())
+ goto out;
+
+ if (is_percpu_thread())
+ goto out;
+
+#ifdef CONFIG_SMP
+ if (current->migration_disabled)
+ goto out;
+#endif
+
+ /*
+ * It is valid to assume CPU-locality during early bootup:
+ */
+ if (system_state < SYSTEM_SCHEDULING)
+ goto out;
+
+ /*
+ * Avoid recursion:
+ */
+ preempt_disable_notrace();
+
+ instrumentation_begin();
+ if (!printk_ratelimit())
+ goto out_enable;
+
+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
+ what1, what2, preempt_count() - 1, current->comm, current->pid);
+
+ printk("caller is %pS\n", __builtin_return_address(0));
+ dump_stack();
+
+out_enable:
+ instrumentation_end();
+ preempt_enable_no_resched_notrace();
+out:
+ return this_cpu;
+}
+
+noinstr unsigned int debug_smp_processor_id(void)
+{
+ return check_preemption_disabled("smp_processor_id", "");
+}
+EXPORT_SYMBOL(debug_smp_processor_id);
+
+noinstr void __this_cpu_preempt_check(const char *op)
+{
+ check_preemption_disabled("__this_cpu_", op);
+}
+EXPORT_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/sort.c b/lib/sort.c
new file mode 100644
index 000000000..b399bf10d
--- /dev/null
+++ b/lib/sort.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A fast, small, non-recursive O(n log n) sort for the Linux kernel
+ *
+ * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
+ * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
+ *
+ * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
+ * better) at the expense of stack usage and much larger code to avoid
+ * quicksort's O(n^2) worst case.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/sort.h>
+
+/**
+ * is_aligned - is this pointer & size okay for word-wide copying?
+ * @base: pointer to data
+ * @size: size of each element
+ * @align: required alignment (typically 4 or 8)
+ *
+ * Returns true if elements can be copied using word loads and stores.
+ * The size must be a multiple of the alignment, and the base address must
+ * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+ *
+ * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
+ * to "if ((a | b) & mask)", so we do that by hand.
+ */
+__attribute_const__ __always_inline
+static bool is_aligned(const void *base, size_t size, unsigned char align)
+{
+ unsigned char lsbits = (unsigned char)size;
+
+ (void)base;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ lsbits |= (unsigned char)(uintptr_t)base;
+#endif
+ return (lsbits & (align - 1)) == 0;
+}
+
+/**
+ * swap_words_32 - swap two elements in 32-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
+ *
+ * Exchange the two objects in memory. This exploits base+index addressing,
+ * which basically all CPUs have, to minimize loop overhead computations.
+ *
+ * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
+ * bottom of the loop, even though the zero flag is still valid from the
+ * subtract (since the intervening mov instructions don't alter the flags).
+ * Gcc 8.1.0 doesn't have that problem.
+ */
+static void swap_words_32(void *a, void *b, size_t n)
+{
+ do {
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+ } while (n);
+}
+
+/**
+ * swap_words_64 - swap two elements in 64-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
+ *
+ * Exchange the two objects in memory. This exploits base+index
+ * addressing, which basically all CPUs have, to minimize loop overhead
+ * computations.
+ *
+ * We'd like to use 64-bit loads if possible. If they're not, emulating
+ * one requires base+index+4 addressing which x86 has but most other
+ * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
+ * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
+ * x32 ABI). Are there any cases the kernel needs to worry about?
+ */
+static void swap_words_64(void *a, void *b, size_t n)
+{
+ do {
+#ifdef CONFIG_64BIT
+ u64 t = *(u64 *)(a + (n -= 8));
+ *(u64 *)(a + n) = *(u64 *)(b + n);
+ *(u64 *)(b + n) = t;
+#else
+ /* Use two 32-bit transfers to avoid base+index+4 addressing */
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+
+ t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+#endif
+ } while (n);
+}
+
+/**
+ * swap_bytes - swap two elements a byte at a time
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
+ *
+ * This is the fallback if alignment doesn't allow using larger chunks.
+ */
+static void swap_bytes(void *a, void *b, size_t n)
+{
+ do {
+ char t = ((char *)a)[--n];
+ ((char *)a)[n] = ((char *)b)[n];
+ ((char *)b)[n] = t;
+ } while (n);
+}
+
+/*
+ * The values are arbitrary as long as they can't be confused with
+ * a pointer, but small integers make for the smallest compare
+ * instructions.
+ */
+#define SWAP_WORDS_64 (swap_r_func_t)0
+#define SWAP_WORDS_32 (swap_r_func_t)1
+#define SWAP_BYTES (swap_r_func_t)2
+#define SWAP_WRAPPER (swap_r_func_t)3
+
+struct wrapper {
+ cmp_func_t cmp;
+ swap_func_t swap;
+};
+
+/*
+ * The function pointer is last to make tail calls most efficient if the
+ * compiler decides not to inline this function.
+ */
+static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
+{
+ if (swap_func == SWAP_WRAPPER) {
+ ((const struct wrapper *)priv)->swap(a, b, (int)size);
+ return;
+ }
+
+ if (swap_func == SWAP_WORDS_64)
+ swap_words_64(a, b, size);
+ else if (swap_func == SWAP_WORDS_32)
+ swap_words_32(a, b, size);
+ else if (swap_func == SWAP_BYTES)
+ swap_bytes(a, b, size);
+ else
+ swap_func(a, b, (int)size, priv);
+}
+
+#define _CMP_WRAPPER ((cmp_r_func_t)0L)
+
+static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
+{
+ if (cmp == _CMP_WRAPPER)
+ return ((const struct wrapper *)priv)->cmp(a, b);
+ return cmp(a, b, priv);
+}
+
+/**
+ * parent - given the offset of the child, find the offset of the parent.
+ * @i: the offset of the heap element whose parent is sought. Non-zero.
+ * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
+ * @size: size of each element
+ *
+ * In terms of array indexes, the parent of element j = @i/@size is simply
+ * (j-1)/2. But when working in byte offsets, we can't use implicit
+ * truncation of integer divides.
+ *
+ * Fortunately, we only need one bit of the quotient, not the full divide.
+ * @size has a least significant bit. That bit will be clear if @i is
+ * an even multiple of @size, and set if it's an odd multiple.
+ *
+ * Logically, we're doing "if (i & lsbit) i -= size;", but since the
+ * branch is unpredictable, it's done with a bit of clever branch-free
+ * code instead.
+ */
+__attribute_const__ __always_inline
+static size_t parent(size_t i, unsigned int lsbit, size_t size)
+{
+ i -= size;
+ i -= size & -(i & lsbit);
+ return i / 2;
+}
+
+/**
+ * sort_r - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * This function does a heapsort on the given array. You may provide
+ * a swap_func function if you need to do something more than a memory
+ * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
+ * avoids a slow retpoline and so is significantly faster.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * quicksort is slightly faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+void sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ /* pre-scale counters for performance */
+ size_t n = num * size, a = (num/2) * size;
+ const unsigned int lsbit = size & -size; /* Used to find parent */
+
+ if (!a) /* num < 2 || size == 0 */
+ return;
+
+ /* called from 'sort' without swap function, let's pick the default */
+ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap)
+ swap_func = NULL;
+
+ if (!swap_func) {
+ if (is_aligned(base, size, 8))
+ swap_func = SWAP_WORDS_64;
+ else if (is_aligned(base, size, 4))
+ swap_func = SWAP_WORDS_32;
+ else
+ swap_func = SWAP_BYTES;
+ }
+
+ /*
+ * Loop invariants:
+ * 1. elements [a,n) satisfy the heap property (compare greater than
+ * all of their children),
+ * 2. elements [n,num*size) are sorted, and
+ * 3. a <= b <= c <= d <= n (whenever they are valid).
+ */
+ for (;;) {
+ size_t b, c, d;
+
+ if (a) /* Building heap: sift down --a */
+ a -= size;
+ else if (n -= size) /* Sorting: Extract root to --n */
+ do_swap(base, base + n, size, swap_func, priv);
+ else /* Sort complete */
+ break;
+
+ /*
+ * Sift element at "a" down into heap. This is the
+ * "bottom-up" variant, which significantly reduces
+ * calls to cmp_func(): we find the sift-down path all
+ * the way to the leaves (one compare per level), then
+ * backtrack to find where to insert the target element.
+ *
+ * Because elements tend to sift down close to the leaves,
+ * this uses fewer compares than doing two per level
+ * on the way down. (A bit more than half as many on
+ * average, 3/4 worst-case.)
+ */
+ for (b = a; c = 2*b + size, (d = c + size) < n;)
+ b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
+ if (d == n) /* Special case last leaf with no sibling */
+ b = c;
+
+ /* Now backtrack from "b" to the correct location for "a" */
+ while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
+ b = parent(b, lsbit, size);
+ c = b; /* Where "a" belongs */
+ while (b != a) { /* Shift it into place */
+ b = parent(b, lsbit, size);
+ do_swap(base + b, base + c, size, swap_func, priv);
+ }
+ }
+}
+EXPORT_SYMBOL(sort_r);
+
+void sort(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap = swap_func,
+ };
+
+ return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+}
+EXPORT_SYMBOL(sort);
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
new file mode 100644
index 000000000..2f5aa8518
--- /dev/null
+++ b/lib/stackdepot.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Stack depot - a stack trace storage that avoids duplication.
+ *
+ * Internally, stack depot maintains a hash table of unique stacktraces. The
+ * stack traces themselves are stored contiguously one after another in a set
+ * of separate page allocations.
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on the code by Dmitry Chernenkov.
+ */
+
+#define pr_fmt(fmt) "stackdepot: " fmt
+
+#include <linux/gfp.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/kmsan.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/kasan-enabled.h>
+
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define DEPOT_VALID_BITS 1
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
+ DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOLS_CAP 8192
+#define DEPOT_MAX_POOLS \
+ (((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
+ (1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
+
+/* Compact structure that stores a reference to a stack. */
+union handle_parts {
+ depot_stack_handle_t handle;
+ struct {
+ u32 pool_index : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 valid : DEPOT_VALID_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
+ };
+};
+
+struct stack_record {
+ struct stack_record *next; /* Link in the hash table */
+ u32 hash; /* Hash in the hash table */
+ u32 size; /* Number of stored frames */
+ union handle_parts handle;
+ unsigned long entries[]; /* Variable-sized array of frames */
+};
+
+static bool stack_depot_disabled;
+static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
+static bool __stack_depot_early_init_passed __initdata;
+
+/* Use one hash table bucket per 16 KB of memory. */
+#define STACK_HASH_TABLE_SCALE 14
+/* Limit the number of buckets between 4K and 1M. */
+#define STACK_BUCKET_NUMBER_ORDER_MIN 12
+#define STACK_BUCKET_NUMBER_ORDER_MAX 20
+/* Initial seed for jhash2. */
+#define STACK_HASH_SEED 0x9747b28c
+
+/* Hash table of pointers to stored stack traces. */
+static struct stack_record **stack_table;
+/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
+static unsigned int stack_bucket_number_order;
+/* Hash mask for indexing the table. */
+static unsigned int stack_hash_mask;
+
+/* Array of memory regions that store stack traces. */
+static void *stack_pools[DEPOT_MAX_POOLS];
+/* Currently used pool in stack_pools. */
+static int pool_index;
+/* Offset to the unused space in the currently used pool. */
+static size_t pool_offset;
+/* Lock that protects the variables above. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
+/*
+ * Stack depot tries to keep an extra pool allocated even before it runs out
+ * of space in the currently used pool.
+ * This flag marks that this next extra pool needs to be allocated and
+ * initialized. It has the value 0 when either the next pool is not yet
+ * initialized or the limit on the number of pools is reached.
+ */
+static int next_pool_required = 1;
+
+static int __init disable_stack_depot(char *str)
+{
+ int ret;
+
+ ret = kstrtobool(str, &stack_depot_disabled);
+ if (!ret && stack_depot_disabled) {
+ pr_info("disabled\n");
+ stack_table = NULL;
+ }
+ return 0;
+}
+early_param("stack_depot_disable", disable_stack_depot);
+
+void __init stack_depot_request_early_init(void)
+{
+ /* Too late to request early init now. */
+ WARN_ON(__stack_depot_early_init_passed);
+
+ __stack_depot_early_init_requested = true;
+}
+
+/* Allocates a hash table via memblock. Can only be used during early boot. */
+int __init stack_depot_early_init(void)
+{
+ unsigned long entries = 0;
+
+ /* This function must be called only once, from mm_init(). */
+ if (WARN_ON(__stack_depot_early_init_passed))
+ return 0;
+ __stack_depot_early_init_passed = true;
+
+ /*
+ * If KASAN is enabled, use the maximum order: KASAN is frequently used
+ * in fuzzing scenarios, which leads to a large number of different
+ * stack traces being stored in stack depot.
+ */
+ if (kasan_enabled() && !stack_bucket_number_order)
+ stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
+
+ if (!__stack_depot_early_init_requested || stack_depot_disabled)
+ return 0;
+
+ /*
+ * If stack_bucket_number_order is not set, leave entries as 0 to rely
+ * on the automatic calculations performed by alloc_large_system_hash.
+ */
+ if (stack_bucket_number_order)
+ entries = 1UL << stack_bucket_number_order;
+ pr_info("allocating hash table via alloc_large_system_hash\n");
+ stack_table = alloc_large_system_hash("stackdepot",
+ sizeof(struct stack_record *),
+ entries,
+ STACK_HASH_TABLE_SCALE,
+ HASH_EARLY | HASH_ZERO,
+ NULL,
+ &stack_hash_mask,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
+ 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Allocates a hash table via kvcalloc. Can be used after boot. */
+int stack_depot_init(void)
+{
+ static DEFINE_MUTEX(stack_depot_init_mutex);
+ unsigned long entries;
+ int ret = 0;
+
+ mutex_lock(&stack_depot_init_mutex);
+
+ if (stack_depot_disabled || stack_table)
+ goto out_unlock;
+
+ /*
+ * Similarly to stack_depot_early_init, use stack_bucket_number_order
+ * if assigned, and rely on automatic scaling otherwise.
+ */
+ if (stack_bucket_number_order) {
+ entries = 1UL << stack_bucket_number_order;
+ } else {
+ int scale = STACK_HASH_TABLE_SCALE;
+
+ entries = nr_free_buffer_pages();
+ entries = roundup_pow_of_two(entries);
+
+ if (scale > PAGE_SHIFT)
+ entries >>= (scale - PAGE_SHIFT);
+ else
+ entries <<= (PAGE_SHIFT - scale);
+ }
+
+ if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
+ if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
+ entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
+
+ pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
+ stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
+ if (!stack_table) {
+ pr_err("hash table allocation failed, disabling\n");
+ stack_depot_disabled = true;
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ stack_hash_mask = entries - 1;
+
+out_unlock:
+ mutex_unlock(&stack_depot_init_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stack_depot_init);
+
+/* Uses preallocated memory to initialize a new stack depot pool. */
+static void depot_init_pool(void **prealloc)
+{
+ /*
+ * If the next pool is already initialized or the maximum number of
+ * pools is reached, do not use the preallocated memory.
+ * smp_load_acquire() here pairs with smp_store_release() below and
+ * in depot_alloc_stack().
+ */
+ if (!smp_load_acquire(&next_pool_required))
+ return;
+
+ /* Check if the current pool is not yet allocated. */
+ if (stack_pools[pool_index] == NULL) {
+ /* Use the preallocated memory for the current pool. */
+ stack_pools[pool_index] = *prealloc;
+ *prealloc = NULL;
+ } else {
+ /*
+ * Otherwise, use the preallocated memory for the next pool
+ * as long as we do not exceed the maximum number of pools.
+ */
+ if (pool_index + 1 < DEPOT_MAX_POOLS) {
+ stack_pools[pool_index + 1] = *prealloc;
+ *prealloc = NULL;
+ }
+ /*
+ * At this point, either the next pool is initialized or the
+ * maximum number of pools is reached. In either case, take
+ * note that initializing another pool is not required.
+ * This smp_store_release pairs with smp_load_acquire() above
+ * and in stack_depot_save().
+ */
+ smp_store_release(&next_pool_required, 0);
+ }
+}
+
+/* Allocates a new stack in a stack depot pool. */
+static struct stack_record *
+depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+{
+ struct stack_record *stack;
+ size_t required_size = struct_size(stack, entries, size);
+
+ required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
+
+ /* Check if there is not enough space in the current pool. */
+ if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
+ /* Bail out if we reached the pool limit. */
+ if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return NULL;
+ }
+
+ /*
+ * Move on to the next pool.
+ * WRITE_ONCE pairs with potential concurrent read in
+ * stack_depot_fetch().
+ */
+ WRITE_ONCE(pool_index, pool_index + 1);
+ pool_offset = 0;
+ /*
+ * If the maximum number of pools is not reached, take note
+ * that the next pool needs to initialized.
+ * smp_store_release() here pairs with smp_load_acquire() in
+ * stack_depot_save() and depot_init_pool().
+ */
+ if (pool_index + 1 < DEPOT_MAX_POOLS)
+ smp_store_release(&next_pool_required, 1);
+ }
+
+ /* Assign the preallocated memory to a pool if required. */
+ if (*prealloc)
+ depot_init_pool(prealloc);
+
+ /* Check if we have a pool to save the stack trace. */
+ if (stack_pools[pool_index] == NULL)
+ return NULL;
+
+ /* Save the stack trace. */
+ stack = stack_pools[pool_index] + pool_offset;
+ stack->hash = hash;
+ stack->size = size;
+ stack->handle.pool_index = pool_index;
+ stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
+ stack->handle.valid = 1;
+ stack->handle.extra = 0;
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+ pool_offset += required_size;
+ /*
+ * Let KMSAN know the stored stack record is initialized. This shall
+ * prevent false positive reports if instrumented code accesses it.
+ */
+ kmsan_unpoison_memory(stack, required_size);
+
+ return stack;
+}
+
+/* Calculates the hash for a stack. */
+static inline u32 hash_stack(unsigned long *entries, unsigned int size)
+{
+ return jhash2((u32 *)entries,
+ array_size(size, sizeof(*entries)) / sizeof(u32),
+ STACK_HASH_SEED);
+}
+
+/*
+ * Non-instrumented version of memcmp().
+ * Does not check the lexicographical order, only the equality.
+ */
+static inline
+int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
+ unsigned int n)
+{
+ for ( ; n-- ; u1++, u2++) {
+ if (*u1 != *u2)
+ return 1;
+ }
+ return 0;
+}
+
+/* Finds a stack in a bucket of the hash table. */
+static inline struct stack_record *find_stack(struct stack_record *bucket,
+ unsigned long *entries, int size,
+ u32 hash)
+{
+ struct stack_record *found;
+
+ for (found = bucket; found; found = found->next) {
+ if (found->hash == hash &&
+ found->size == size &&
+ !stackdepot_memcmp(entries, found->entries, size))
+ return found;
+ }
+ return NULL;
+}
+
+depot_stack_handle_t __stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags, bool can_alloc)
+{
+ struct stack_record *found = NULL, **bucket;
+ union handle_parts retval = { .handle = 0 };
+ struct page *page = NULL;
+ void *prealloc = NULL;
+ unsigned long flags;
+ u32 hash;
+
+ /*
+ * If this stack trace is from an interrupt, including anything before
+ * interrupt entry usually leads to unbounded stack depot growth.
+ *
+ * Since use of filter_irq_stacks() is a requirement to ensure stack
+ * depot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stack depot.
+ */
+ nr_entries = filter_irq_stacks(entries, nr_entries);
+
+ if (unlikely(nr_entries == 0) || stack_depot_disabled)
+ goto fast_exit;
+
+ hash = hash_stack(entries, nr_entries);
+ bucket = &stack_table[hash & stack_hash_mask];
+
+ /*
+ * Fast path: look the stack trace up without locking.
+ * The smp_load_acquire() here pairs with smp_store_release() to
+ * |bucket| below.
+ */
+ found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
+ if (found)
+ goto exit;
+
+ /*
+ * Check if another stack pool needs to be initialized. If so, allocate
+ * the memory now - we won't be able to do that under the lock.
+ *
+ * The smp_load_acquire() here pairs with smp_store_release() to
+ * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+ */
+ if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
+ /*
+ * Zero out zone modifiers, as we don't have specific zone
+ * requirements. Keep the flags related to allocation in atomic
+ * contexts and I/O.
+ */
+ alloc_flags &= ~GFP_ZONEMASK;
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ alloc_flags |= __GFP_NOWARN;
+ page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
+ if (page)
+ prealloc = page_address(page);
+ }
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+
+ found = find_stack(*bucket, entries, nr_entries, hash);
+ if (!found) {
+ struct stack_record *new =
+ depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+
+ if (new) {
+ new->next = *bucket;
+ /*
+ * This smp_store_release() pairs with
+ * smp_load_acquire() from |bucket| above.
+ */
+ smp_store_release(bucket, new);
+ found = new;
+ }
+ } else if (prealloc) {
+ /*
+ * Stack depot already contains this stack trace, but let's
+ * keep the preallocated memory for the future.
+ */
+ depot_init_pool(&prealloc);
+ }
+
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+exit:
+ if (prealloc) {
+ /* Stack depot didn't use this memory, free it. */
+ free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
+ }
+ if (found)
+ retval.handle = found->handle.handle;
+fast_exit:
+ return retval.handle;
+}
+EXPORT_SYMBOL_GPL(__stack_depot_save);
+
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags)
+{
+ return __stack_depot_save(entries, nr_entries, alloc_flags, true);
+}
+EXPORT_SYMBOL_GPL(stack_depot_save);
+
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
+{
+ union handle_parts parts = { .handle = handle };
+ /*
+ * READ_ONCE pairs with potential concurrent write in
+ * depot_alloc_stack.
+ */
+ int pool_index_cached = READ_ONCE(pool_index);
+ void *pool;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+ struct stack_record *stack;
+
+ *entries = NULL;
+ /*
+ * Let KMSAN know *entries is initialized. This shall prevent false
+ * positive reports if instrumented code accesses it.
+ */
+ kmsan_unpoison_memory(entries, sizeof(*entries));
+
+ if (!handle)
+ return 0;
+
+ if (parts.pool_index > pool_index_cached) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ parts.pool_index, pool_index_cached, handle);
+ return 0;
+ }
+ pool = stack_pools[parts.pool_index];
+ if (!pool)
+ return 0;
+ stack = pool + offset;
+
+ *entries = stack->entries;
+ return stack->size;
+}
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
+
+void stack_depot_print(depot_stack_handle_t stack)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(stack, &entries);
+ if (nr_entries > 0)
+ stack_trace_print(entries, nr_entries, 0);
+}
+EXPORT_SYMBOL_GPL(stack_depot_print);
+
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(handle, &entries);
+ return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
+ spaces) : 0;
+}
+EXPORT_SYMBOL_GPL(stack_depot_snprint);
+
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits)
+{
+ union handle_parts parts = { .handle = handle };
+
+ /* Don't set extra bits on empty handles. */
+ if (!handle)
+ return 0;
+
+ parts.extra = extra_bits;
+ return parts.handle;
+}
+EXPORT_SYMBOL(stack_depot_set_extra_bits);
+
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+
+ return parts.extra;
+}
+EXPORT_SYMBOL(stack_depot_get_extra_bits);
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
new file mode 100644
index 000000000..05947a2fe
--- /dev/null
+++ b/lib/stackinit_kunit.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test cases for compiler-based stack variable zeroing via
+ * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * For example, see:
+ * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
+ * ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
+ * --make_option LLVM=1 \
+ * --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+/* Exfiltration buffer. */
+#define MAX_VAR_SIZE 128
+static u8 check_buf[MAX_VAR_SIZE];
+
+/* Character array to trigger stack protector in all functions. */
+#define VAR_BUFFER 32
+
+/* Volatile mask to convince compiler to copy memory with 0xff. */
+static volatile u8 forced_mask = 0xff;
+
+/* Location and size tracking to validate fill and test are colocated. */
+static void *fill_start, *target_start;
+static size_t fill_size, target_size;
+
+static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
+ char *needle_start, size_t needle_size)
+{
+ if (needle_start >= haystack_start &&
+ needle_start + needle_size <= haystack_start + haystack_size)
+ return true;
+ return false;
+}
+
+/* Whether the test is expected to fail. */
+#define WANT_SUCCESS 0
+#define XFAIL 1
+
+#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
+#define DO_NOTHING_TYPE_STRING(var_type) void
+#define DO_NOTHING_TYPE_STRUCT(var_type) void
+
+#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
+#define DO_NOTHING_RETURN_STRING(ptr) /**/
+#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+
+#define DO_NOTHING_CALL_SCALAR(var, name) \
+ (var) = do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_STRING(var, name) \
+ do_nothing_ ## name(var)
+#define DO_NOTHING_CALL_STRUCT(var, name) \
+ do_nothing_ ## name(&(var))
+
+#define FETCH_ARG_SCALAR(var) &var
+#define FETCH_ARG_STRING(var) var
+#define FETCH_ARG_STRUCT(var) &var
+
+#define FILL_SIZE_STRING 16
+
+#define INIT_CLONE_SCALAR /**/
+#define INIT_CLONE_STRING [FILL_SIZE_STRING]
+#define INIT_CLONE_STRUCT /**/
+
+#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
+#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
+/*
+ * For the struct, intentionally poison padding to see if it gets
+ * copied out in direct assignments.
+ * */
+#define ZERO_CLONE_STRUCT(zero) \
+ do { \
+ memset(&(zero), 0xFF, sizeof(zero)); \
+ zero.one = 0; \
+ zero.two = 0; \
+ zero.three = 0; \
+ zero.four = 0; \
+ } while (0)
+
+#define INIT_SCALAR_none(var_type) /**/
+#define INIT_SCALAR_zero(var_type) = 0
+
+#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/
+#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { }
+
+#define INIT_STRUCT_none(var_type) /**/
+#define INIT_STRUCT_zero(var_type) = { }
+
+
+#define __static_partial { .two = 0, }
+#define __static_all { .one = 0, \
+ .two = 0, \
+ .three = 0, \
+ .four = 0, \
+ }
+#define __dynamic_partial { .two = arg->two, }
+#define __dynamic_all { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
+ }
+#define __runtime_partial var.two = 0
+#define __runtime_all var.one = 0; \
+ var.two = 0; \
+ var.three = 0; \
+ var.four = 0
+
+#define INIT_STRUCT_static_partial(var_type) \
+ = __static_partial
+#define INIT_STRUCT_static_all(var_type) \
+ = __static_all
+#define INIT_STRUCT_dynamic_partial(var_type) \
+ = __dynamic_partial
+#define INIT_STRUCT_dynamic_all(var_type) \
+ = __dynamic_all
+#define INIT_STRUCT_runtime_partial(var_type) \
+ ; __runtime_partial
+#define INIT_STRUCT_runtime_all(var_type) \
+ ; __runtime_all
+
+#define INIT_STRUCT_assigned_static_partial(var_type) \
+ ; var = (var_type)__static_partial
+#define INIT_STRUCT_assigned_static_all(var_type) \
+ ; var = (var_type)__static_all
+#define INIT_STRUCT_assigned_dynamic_partial(var_type) \
+ ; var = (var_type)__dynamic_partial
+#define INIT_STRUCT_assigned_dynamic_all(var_type) \
+ ; var = (var_type)__dynamic_all
+
+#define INIT_STRUCT_assigned_copy(var_type) \
+ ; var = *(arg)
+
+/*
+ * @name: unique string name for the test
+ * @var_type: type to be tested for zeroing initialization
+ * @which: is this a SCALAR, STRING, or STRUCT type?
+ * @init_level: what kind of initialization is performed
+ * @xfail: is this test expected to fail?
+ */
+#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
+/* Returns 0 on success, 1 on failure. */ \
+static noinline void test_ ## name (struct kunit *test) \
+{ \
+ var_type zero INIT_CLONE_ ## which; \
+ int ignored; \
+ u8 sum = 0, i; \
+ \
+ /* Notice when a new test is larger than expected. */ \
+ BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
+ \
+ /* Fill clone type with zero for per-field init. */ \
+ ZERO_CLONE_ ## which(zero); \
+ /* Clear entire check buffer for 0xFF overlap test. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Fill stack with 0xFF. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 1, \
+ FETCH_ARG_ ## which(zero)); \
+ /* Verify all bytes overwritten with 0xFF. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] != 0xFF); \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0xFF!?\n"); \
+ /* Clear entire check buffer for later bit tests. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Extract stack-defined variable contents. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 0, \
+ FETCH_ARG_ ## which(zero)); \
+ \
+ /* Validate that compiler lined up fill and target. */ \
+ KUNIT_ASSERT_TRUE_MSG(test, \
+ stackinit_range_contains(fill_start, fill_size, \
+ target_start, target_size), \
+ "stack fill missed target!? " \
+ "(fill %zu wide, target offset by %d)\n", \
+ fill_size, \
+ (int)((ssize_t)(uintptr_t)fill_start - \
+ (ssize_t)(uintptr_t)target_start)); \
+ \
+ /* Look for any bytes still 0xFF in check region. */ \
+ for (sum = 0, i = 0; i < target_size; i++) \
+ sum += (check_buf[i] == 0xFF); \
+ \
+ if (sum != 0 && xfail) \
+ kunit_skip(test, \
+ "XFAIL uninit bytes: %d\n", \
+ sum); \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "uninit bytes: %d\n", sum); \
+}
+#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
+/* no-op to force compiler into ignoring "uninitialized" vars */\
+static noinline DO_NOTHING_TYPE_ ## which(var_type) \
+do_nothing_ ## name(var_type *ptr) \
+{ \
+ /* Will always be true, but compiler doesn't know. */ \
+ if ((unsigned long)ptr > 0x2) \
+ return DO_NOTHING_RETURN_ ## which(ptr); \
+ else \
+ return DO_NOTHING_RETURN_ ## which(ptr + 1); \
+} \
+static noinline int leaf_ ## name(unsigned long sp, bool fill, \
+ var_type *arg) \
+{ \
+ char buf[VAR_BUFFER]; \
+ var_type var \
+ INIT_ ## which ## _ ## init_level(var_type); \
+ \
+ target_start = &var; \
+ target_size = sizeof(var); \
+ /* \
+ * Keep this buffer around to make sure we've got a \
+ * stack frame of SOME kind... \
+ */ \
+ memset(buf, (char)(sp & 0xff), sizeof(buf)); \
+ /* Fill variable with 0xFF. */ \
+ if (fill) { \
+ fill_start = &var; \
+ fill_size = sizeof(var); \
+ memset(fill_start, \
+ (char)((sp & 0xff) | forced_mask), \
+ fill_size); \
+ } \
+ \
+ /* Silence "never initialized" warnings. */ \
+ DO_NOTHING_CALL_ ## which(var, name); \
+ \
+ /* Exfiltrate "var". */ \
+ memcpy(check_buf, target_start, target_size); \
+ \
+ return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
+} \
+DEFINE_TEST_DRIVER(name, var_type, which, xfail)
+
+/* Structure with no padding. */
+struct test_packed {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Simple structure with padding likely to be covered by compiler. */
+struct test_small_hole {
+ size_t one;
+ char two;
+ /* 3 byte padding hole here. */
+ int three;
+ unsigned long four;
+};
+
+/* Trigger unhandled padding in a structure. */
+struct test_big_hole {
+ u8 one;
+ u8 two;
+ u8 three;
+ /* 61 byte padding hole here. */
+ u8 four __aligned(64);
+} __aligned(64);
+
+struct test_trailing_hole {
+ char *one;
+ char *two;
+ char *three;
+ char four;
+ /* "sizeof(unsigned long) - 1" byte padding hole here. */
+};
+
+/* Test if STRUCTLEAK is clearing structs with __user fields. */
+struct test_user {
+ u8 one;
+ unsigned long two;
+ char __user *three;
+ unsigned long four;
+};
+
+#define ALWAYS_PASS WANT_SUCCESS
+#define ALWAYS_FAIL XFAIL
+
+#ifdef CONFIG_INIT_STACK_NONE
+# define USER_PASS XFAIL
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS XFAIL
+# define STRONG_PASS XFAIL
+#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS XFAIL
+#else
+# define USER_PASS WANT_SUCCESS
+# define BYREF_PASS WANT_SUCCESS
+# define STRONG_PASS WANT_SUCCESS
+#endif
+
+#define DEFINE_SCALAR_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, name, SCALAR, \
+ init, xfail)
+
+#define DEFINE_SCALAR_TESTS(init, xfail) \
+ DEFINE_SCALAR_TEST(u8, init, xfail); \
+ DEFINE_SCALAR_TEST(u16, init, xfail); \
+ DEFINE_SCALAR_TEST(u32, init, xfail); \
+ DEFINE_SCALAR_TEST(u64, init, xfail); \
+ DEFINE_TEST(char_array_ ## init, unsigned char, \
+ STRING, init, xfail)
+
+#define DEFINE_STRUCT_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, \
+ struct test_ ## name, STRUCT, init, \
+ xfail)
+
+#define DEFINE_STRUCT_TESTS(init, xfail) \
+ DEFINE_STRUCT_TEST(small_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(big_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(packed, init, xfail)
+
+#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \
+ DEFINE_STRUCT_TESTS(base ## _ ## partial, \
+ xfail); \
+ DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
+
+/* These should be fully initialized all the time! */
+DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
+DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
+/* Struct initializers: padding may be left uninitialized. */
+DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
+DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
+/* No initialization without compiler instrumentation. */
+DEFINE_SCALAR_TESTS(none, STRONG_PASS);
+DEFINE_STRUCT_TESTS(none, BYREF_PASS);
+/* Initialization of members with __user attribute. */
+DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
+
+/*
+ * Check two uses through a variable declaration outside either path,
+ * which was noticed as a special case in porting earlier stack init
+ * compiler logic.
+ */
+static int noinline __leaf_switch_none(int path, bool fill)
+{
+ switch (path) {
+ /*
+ * This is intentionally unreachable. To silence the
+ * warning, build with -Wno-switch-unreachable
+ */
+ uint64_t var[10];
+
+ case 1:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, forced_mask | 0x55, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ case 2:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, forced_mask | 0xaa, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ default:
+ var[1] = 5;
+ return var[1] & forced_mask;
+ }
+ return 0;
+}
+
+static noinline int leaf_switch_1_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(1, fill);
+}
+
+static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(2, fill);
+}
+
+/*
+ * These are expected to fail for most configurations because neither
+ * GCC nor Clang have a way to perform initialization of variables in
+ * non-code areas (i.e. in a switch statement before the first "case").
+ * https://bugs.llvm.org/show_bug.cgi?id=44916
+ */
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
+
+#define KUNIT_test_scalars(init) \
+ KUNIT_CASE(test_u8_ ## init), \
+ KUNIT_CASE(test_u16_ ## init), \
+ KUNIT_CASE(test_u32_ ## init), \
+ KUNIT_CASE(test_u64_ ## init), \
+ KUNIT_CASE(test_char_array_ ## init)
+
+#define KUNIT_test_structs(init) \
+ KUNIT_CASE(test_small_hole_ ## init), \
+ KUNIT_CASE(test_big_hole_ ## init), \
+ KUNIT_CASE(test_trailing_hole_ ## init),\
+ KUNIT_CASE(test_packed_ ## init) \
+
+static struct kunit_case stackinit_test_cases[] = {
+ /* These are explicitly initialized and should always pass. */
+ KUNIT_test_scalars(zero),
+ KUNIT_test_structs(zero),
+ /* Padding here appears to be accidentally always initialized? */
+ KUNIT_test_structs(dynamic_partial),
+ KUNIT_test_structs(assigned_dynamic_partial),
+ /* Padding initialization depends on compiler behaviors. */
+ KUNIT_test_structs(static_partial),
+ KUNIT_test_structs(static_all),
+ KUNIT_test_structs(dynamic_all),
+ KUNIT_test_structs(runtime_partial),
+ KUNIT_test_structs(runtime_all),
+ KUNIT_test_structs(assigned_static_partial),
+ KUNIT_test_structs(assigned_static_all),
+ KUNIT_test_structs(assigned_dynamic_all),
+ /* Everything fails this since it effectively performs a memcpy(). */
+ KUNIT_test_structs(assigned_copy),
+ /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
+ KUNIT_test_scalars(none),
+ KUNIT_CASE(test_switch_1_none),
+ KUNIT_CASE(test_switch_2_none),
+ /* STRUCTLEAK_BYREF should cover from here down. */
+ KUNIT_test_structs(none),
+ /* STRUCTLEAK will only cover this. */
+ KUNIT_CASE(test_user),
+ {}
+};
+
+static struct kunit_suite stackinit_test_suite = {
+ .name = "stackinit",
+ .test_cases = stackinit_test_cases,
+};
+
+kunit_test_suites(&stackinit_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
new file mode 100644
index 000000000..a4f77b6a9
--- /dev/null
+++ b/lib/stmp_device.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/stmp_device.h>
+
+#define STMP_MODULE_CLKGATE (1 << 30)
+#define STMP_MODULE_SFTRST (1 << 31)
+
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ writel(mask, addr + STMP_OFFSET_REG_CLR);
+ udelay(1);
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+int stmp_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
+
+ /* set SFTRST to reset the block */
+ writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+
+ /* clear and poll SFTRST */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(stmp_reset_block);
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
new file mode 100644
index 000000000..e21be9551
--- /dev/null
+++ b/lib/strcat_kunit.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kernel module for testing 'strcat' family of functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+static volatile int unconst;
+
+static void strcat_test(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy does nothing. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* 4 characters copied in, stops at %NUL. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ /* 2 more characters copied in okay. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void strncat_test(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy of size 0 does nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Empty copy of size 1 does nothing too. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Copy of max 0 characters should do nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in, even if max is 8. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ KUNIT_EXPECT_EQ(test, dest[6], '\0');
+ /* 2 characters copied in okay, 2 ignored. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void strlcat_test(struct kunit *test)
+{
+ char dest[8] = "";
+ int len = sizeof(dest) + unconst;
+
+ /* Destination is terminated. */
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy is size 0. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Size 1 should keep buffer terminated, report size of source only. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ /* 2 characters copied in okay, gets to 6 total. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 2 characters ignored if max size (7) reached. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 1 of 2 characters skipped, now at true max size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+ /* Everything else ignored, now at full size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+}
+
+static struct kunit_case strcat_test_cases[] = {
+ KUNIT_CASE(strcat_test),
+ KUNIT_CASE(strncat_test),
+ KUNIT_CASE(strlcat_test),
+ {}
+};
+
+static struct kunit_suite strcat_test_suite = {
+ .name = "strcat",
+ .test_cases = strcat_test_cases,
+};
+
+kunit_test_suite(strcat_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/string.c b/lib/string.c
new file mode 100644
index 000000000..be2662395
--- /dev/null
+++ b/lib/string.c
@@ -0,0 +1,880 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/lib/string.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/*
+ * This file should be used only for "library" routines that may have
+ * alternative implementations on specific architectures (generally
+ * found in <asm-xx/string.h>), or get overloaded by FORTIFY_SOURCE.
+ * (Specifically, this file is built with __NO_FORTIFY.)
+ *
+ * Other helper functions should live in string_helpers.c.
+ */
+
+#define __NO_FORTIFY
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+#include <asm/page.h>
+
+#ifndef __HAVE_ARCH_STRNCASECMP
+/**
+ * strncasecmp - Case insensitive, length-limited string comparison
+ * @s1: One string
+ * @s2: The other string
+ * @len: the maximum number of characters to compare
+ */
+int strncasecmp(const char *s1, const char *s2, size_t len)
+{
+ /* Yes, Virginia, it had better be unsigned */
+ unsigned char c1, c2;
+
+ if (!len)
+ return 0;
+
+ do {
+ c1 = *s1++;
+ c2 = *s2++;
+ if (!c1 || !c2)
+ break;
+ if (c1 == c2)
+ continue;
+ c1 = tolower(c1);
+ c2 = tolower(c2);
+ if (c1 != c2)
+ break;
+ } while (--len);
+ return (int)c1 - (int)c2;
+}
+EXPORT_SYMBOL(strncasecmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCASECMP
+int strcasecmp(const char *s1, const char *s2)
+{
+ int c1, c2;
+
+ do {
+ c1 = tolower(*s1++);
+ c2 = tolower(*s2++);
+ } while (c1 == c2 && c1 != 0);
+ return c1 - c2;
+}
+EXPORT_SYMBOL(strcasecmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCPY
+char *strcpy(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ while ((*dest++ = *src++) != '\0')
+ /* nothing */;
+ return tmp;
+}
+EXPORT_SYMBOL(strcpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCPY
+char *strncpy(char *dest, const char *src, size_t count)
+{
+ char *tmp = dest;
+
+ while (count) {
+ if ((*tmp = *src) != 0)
+ src++;
+ tmp++;
+ count--;
+ }
+ return dest;
+}
+EXPORT_SYMBOL(strncpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRLCPY
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ __builtin_memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+EXPORT_SYMBOL(strlcpy);
+#endif
+
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t strscpy(char *dest, const char *src, size_t count)
+{
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ size_t max = count;
+ long res = 0;
+
+ if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
+ return -E2BIG;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ /*
+ * If src is unaligned, don't cross a page boundary,
+ * since we don't know if the next page is mapped.
+ */
+ if ((long)src & (sizeof(long) - 1)) {
+ size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
+ if (limit < max)
+ max = limit;
+ }
+#else
+ /* If src or dest is unaligned, don't do word-at-a-time. */
+ if (((long) dest | (long) src) & (sizeof(long) - 1))
+ max = 0;
+#endif
+
+ /*
+ * read_word_at_a_time() below may read uninitialized bytes after the
+ * trailing zero and use them in comparisons. Disable this optimization
+ * under KMSAN to prevent false positive reports.
+ */
+ if (IS_ENABLED(CONFIG_KMSAN))
+ max = 0;
+
+ while (max >= sizeof(unsigned long)) {
+ unsigned long c, data;
+
+ c = read_word_at_a_time(src+res);
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ *(unsigned long *)(dest+res) = c & zero_bytemask(data);
+ return res + find_zero(data);
+ }
+ *(unsigned long *)(dest+res) = c;
+ res += sizeof(unsigned long);
+ count -= sizeof(unsigned long);
+ max -= sizeof(unsigned long);
+ }
+
+ while (count) {
+ char c;
+
+ c = src[res];
+ dest[res] = c;
+ if (!c)
+ return res;
+ res++;
+ count--;
+ }
+
+ /* Hit buffer length without finding a NUL; force NUL-termination. */
+ if (res)
+ dest[res-1] = '\0';
+
+ return -E2BIG;
+}
+EXPORT_SYMBOL(strscpy);
+#endif
+
+/**
+ * stpcpy - copy a string from src to dest returning a pointer to the new end
+ * of dest, including src's %NUL-terminator. May overrun dest.
+ * @dest: pointer to end of string being copied into. Must be large enough
+ * to receive copy.
+ * @src: pointer to the beginning of string being copied from. Must not overlap
+ * dest.
+ *
+ * stpcpy differs from strcpy in a key way: the return value is a pointer
+ * to the new %NUL-terminating character in @dest. (For strcpy, the return
+ * value is a pointer to the start of @dest). This interface is considered
+ * unsafe as it doesn't perform bounds checking of the inputs. As such it's
+ * not recommended for usage. Instead, its definition is provided in case
+ * the compiler lowers other libcalls to stpcpy.
+ */
+char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
+char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
+{
+ while ((*dest++ = *src++) != '\0')
+ /* nothing */;
+ return --dest;
+}
+EXPORT_SYMBOL(stpcpy);
+
+#ifndef __HAVE_ARCH_STRCAT
+char *strcat(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ while (*dest)
+ dest++;
+ while ((*dest++ = *src++) != '\0')
+ ;
+ return tmp;
+}
+EXPORT_SYMBOL(strcat);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCAT
+char *strncat(char *dest, const char *src, size_t count)
+{
+ char *tmp = dest;
+
+ if (count) {
+ while (*dest)
+ dest++;
+ while ((*dest++ = *src++) != 0) {
+ if (--count == 0) {
+ *dest = '\0';
+ break;
+ }
+ }
+ }
+ return tmp;
+}
+EXPORT_SYMBOL(strncat);
+#endif
+
+#ifndef __HAVE_ARCH_STRLCAT
+size_t strlcat(char *dest, const char *src, size_t count)
+{
+ size_t dsize = strlen(dest);
+ size_t len = strlen(src);
+ size_t res = dsize + len;
+
+ /* This would be a bug */
+ BUG_ON(dsize >= count);
+
+ dest += dsize;
+ count -= dsize;
+ if (len >= count)
+ len = count-1;
+ __builtin_memcpy(dest, src, len);
+ dest[len] = 0;
+ return res;
+}
+EXPORT_SYMBOL(strlcat);
+#endif
+
+#ifndef __HAVE_ARCH_STRCMP
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ */
+int strcmp(const char *cs, const char *ct)
+{
+ unsigned char c1, c2;
+
+ while (1) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strcmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+ unsigned char c1, c2;
+
+ while (count) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ count--;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strncmp);
+#endif
+
+#ifndef __HAVE_ARCH_STRCHR
+/**
+ * strchr - Find the first occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
+ */
+char *strchr(const char *s, int c)
+{
+ for (; *s != (char)c; ++s)
+ if (*s == '\0')
+ return NULL;
+ return (char *)s;
+}
+EXPORT_SYMBOL(strchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRCHRNUL
+/**
+ * strchrnul - Find and return a character in a string, or end of string
+ * @s: The string to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to first occurrence of 'c' in s. If c is not found, then
+ * return a pointer to the null byte at the end of s.
+ */
+char *strchrnul(const char *s, int c)
+{
+ while (*s && *s != (char)c)
+ s++;
+ return (char *)s;
+}
+EXPORT_SYMBOL(strchrnul);
+#endif
+
+/**
+ * strnchrnul - Find and return a character in a length limited string,
+ * or end of string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ *
+ * Returns pointer to the first occurrence of 'c' in s. If c is not found,
+ * then return a pointer to the last character of the string.
+ */
+char *strnchrnul(const char *s, size_t count, int c)
+{
+ while (count-- && *s && *s != (char)c)
+ s++;
+ return (char *)s;
+}
+
+#ifndef __HAVE_ARCH_STRRCHR
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char *strrchr(const char *s, int c)
+{
+ const char *last = NULL;
+ do {
+ if (*s == (char)c)
+ last = s;
+ } while (*s++);
+ return (char *)last;
+}
+EXPORT_SYMBOL(strrchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRNCHR
+/**
+ * strnchr - Find a character in a length limited string
+ * @s: The string to be searched
+ * @count: The number of characters to be searched
+ * @c: The character to search for
+ *
+ * Note that the %NUL-terminator is considered part of the string, and can
+ * be searched for.
+ */
+char *strnchr(const char *s, size_t count, int c)
+{
+ while (count--) {
+ if (*s == (char)c)
+ return (char *)s;
+ if (*s++ == '\0')
+ break;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strnchr);
+#endif
+
+#ifndef __HAVE_ARCH_STRLEN
+size_t strlen(const char *s)
+{
+ const char *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+EXPORT_SYMBOL(strlen);
+#endif
+
+#ifndef __HAVE_ARCH_STRNLEN
+size_t strnlen(const char *s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+EXPORT_SYMBOL(strnlen);
+#endif
+
+#ifndef __HAVE_ARCH_STRSPN
+/**
+ * strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
+ * @s: The string to be searched
+ * @accept: The string to search for
+ */
+size_t strspn(const char *s, const char *accept)
+{
+ const char *p;
+
+ for (p = s; *p != '\0'; ++p) {
+ if (!strchr(accept, *p))
+ break;
+ }
+ return p - s;
+}
+EXPORT_SYMBOL(strspn);
+#endif
+
+#ifndef __HAVE_ARCH_STRCSPN
+/**
+ * strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
+ * @s: The string to be searched
+ * @reject: The string to avoid
+ */
+size_t strcspn(const char *s, const char *reject)
+{
+ const char *p;
+
+ for (p = s; *p != '\0'; ++p) {
+ if (strchr(reject, *p))
+ break;
+ }
+ return p - s;
+}
+EXPORT_SYMBOL(strcspn);
+#endif
+
+#ifndef __HAVE_ARCH_STRPBRK
+/**
+ * strpbrk - Find the first occurrence of a set of characters
+ * @cs: The string to be searched
+ * @ct: The characters to search for
+ */
+char *strpbrk(const char *cs, const char *ct)
+{
+ const char *sc;
+
+ for (sc = cs; *sc != '\0'; ++sc) {
+ if (strchr(ct, *sc))
+ return (char *)sc;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strpbrk);
+#endif
+
+#ifndef __HAVE_ARCH_STRSEP
+/**
+ * strsep - Split a string into tokens
+ * @s: The string to be searched
+ * @ct: The characters to search for
+ *
+ * strsep() updates @s to point after the token, ready for the next call.
+ *
+ * It returns empty tokens, too, behaving exactly like the libc function
+ * of that name. In fact, it was stolen from glibc2 and de-fancy-fied.
+ * Same semantics, slimmer shape. ;)
+ */
+char *strsep(char **s, const char *ct)
+{
+ char *sbegin = *s;
+ char *end;
+
+ if (sbegin == NULL)
+ return NULL;
+
+ end = strpbrk(sbegin, ct);
+ if (end)
+ *end++ = '\0';
+ *s = end;
+ return sbegin;
+}
+EXPORT_SYMBOL(strsep);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET
+/**
+ * memset - Fill a region of memory with the given value
+ * @s: Pointer to the start of the area.
+ * @c: The byte to fill the area with
+ * @count: The size of the area.
+ *
+ * Do not use memset() to access IO space, use memset_io() instead.
+ */
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+}
+EXPORT_SYMBOL(memset);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET16
+/**
+ * memset16() - Fill a memory area with a uint16_t
+ * @s: Pointer to the start of the area.
+ * @v: The value to fill the area with
+ * @count: The number of values to store
+ *
+ * Differs from memset() in that it fills with a uint16_t instead
+ * of a byte. Remember that @count is the number of uint16_ts to
+ * store, not the number of bytes.
+ */
+void *memset16(uint16_t *s, uint16_t v, size_t count)
+{
+ uint16_t *xs = s;
+
+ while (count--)
+ *xs++ = v;
+ return s;
+}
+EXPORT_SYMBOL(memset16);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET32
+/**
+ * memset32() - Fill a memory area with a uint32_t
+ * @s: Pointer to the start of the area.
+ * @v: The value to fill the area with
+ * @count: The number of values to store
+ *
+ * Differs from memset() in that it fills with a uint32_t instead
+ * of a byte. Remember that @count is the number of uint32_ts to
+ * store, not the number of bytes.
+ */
+void *memset32(uint32_t *s, uint32_t v, size_t count)
+{
+ uint32_t *xs = s;
+
+ while (count--)
+ *xs++ = v;
+ return s;
+}
+EXPORT_SYMBOL(memset32);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET64
+/**
+ * memset64() - Fill a memory area with a uint64_t
+ * @s: Pointer to the start of the area.
+ * @v: The value to fill the area with
+ * @count: The number of values to store
+ *
+ * Differs from memset() in that it fills with a uint64_t instead
+ * of a byte. Remember that @count is the number of uint64_ts to
+ * store, not the number of bytes.
+ */
+void *memset64(uint64_t *s, uint64_t v, size_t count)
+{
+ uint64_t *xs = s;
+
+ while (count--)
+ *xs++ = v;
+ return s;
+}
+EXPORT_SYMBOL(memset64);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCPY
+/**
+ * memcpy - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * You should not use this function to access IO space, use memcpy_toio()
+ * or memcpy_fromio() instead.
+ */
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+EXPORT_SYMBOL(memcpy);
+#endif
+
+#ifndef __HAVE_ARCH_MEMMOVE
+/**
+ * memmove - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * Unlike memcpy(), memmove() copes with overlapping areas.
+ */
+void *memmove(void *dest, const void *src, size_t count)
+{
+ char *tmp;
+ const char *s;
+
+ if (dest <= src) {
+ tmp = dest;
+ s = src;
+ while (count--)
+ *tmp++ = *s++;
+ } else {
+ tmp = dest;
+ tmp += count;
+ s = src;
+ s += count;
+ while (count--)
+ *--tmp = *--s;
+ }
+ return dest;
+}
+EXPORT_SYMBOL(memmove);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCMP
+/**
+ * memcmp - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
+#undef memcmp
+__visible int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (count >= sizeof(unsigned long)) {
+ const unsigned long *u1 = cs;
+ const unsigned long *u2 = ct;
+ do {
+ if (get_unaligned(u1) != get_unaligned(u2))
+ break;
+ u1++;
+ u2++;
+ count -= sizeof(unsigned long);
+ } while (count >= sizeof(unsigned long));
+ cs = u1;
+ ct = u2;
+ }
+#endif
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+EXPORT_SYMBOL(memcmp);
+#endif
+
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+int bcmp(const void *a, const void *b, size_t len)
+{
+ return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSCAN
+/**
+ * memscan - Find a character in an area of memory.
+ * @addr: The memory area
+ * @c: The byte to search for
+ * @size: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or 1 byte past
+ * the area if @c is not found
+ */
+void *memscan(void *addr, int c, size_t size)
+{
+ unsigned char *p = addr;
+
+ while (size) {
+ if (*p == (unsigned char)c)
+ return (void *)p;
+ p++;
+ size--;
+ }
+ return (void *)p;
+}
+EXPORT_SYMBOL(memscan);
+#endif
+
+#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char *strstr(const char *s1, const char *s2)
+{
+ size_t l1, l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ l1 = strlen(s1);
+ while (l1 >= l2) {
+ l1--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strstr);
+#endif
+
+#ifndef __HAVE_ARCH_STRNSTR
+/**
+ * strnstr - Find the first substring in a length-limited string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ * @len: the maximum number of characters to search
+ */
+char *strnstr(const char *s1, const char *s2, size_t len)
+{
+ size_t l2;
+
+ l2 = strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ while (len >= l2) {
+ len--;
+ if (!memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(strnstr);
+#endif
+
+#ifndef __HAVE_ARCH_MEMCHR
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
+void *memchr(const void *s, int c, size_t n)
+{
+ const unsigned char *p = s;
+ while (n-- != 0) {
+ if ((unsigned char)c == *p++) {
+ return (void *)(p - 1);
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(memchr);
+#endif
+
+static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
+{
+ while (bytes) {
+ if (*start != value)
+ return (void *)start;
+ start++;
+ bytes--;
+ }
+ return NULL;
+}
+
+/**
+ * memchr_inv - Find an unmatching character in an area of memory.
+ * @start: The memory area
+ * @c: Find a character other than c
+ * @bytes: The size of the area.
+ *
+ * returns the address of the first character other than @c, or %NULL
+ * if the whole buffer contains just @c.
+ */
+void *memchr_inv(const void *start, int c, size_t bytes)
+{
+ u8 value = c;
+ u64 value64;
+ unsigned int words, prefix;
+
+ if (bytes <= 16)
+ return check_bytes8(start, value, bytes);
+
+ value64 = value;
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ value64 *= 0x0101010101010101ULL;
+#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
+ value64 *= 0x01010101;
+ value64 |= value64 << 32;
+#else
+ value64 |= value64 << 8;
+ value64 |= value64 << 16;
+ value64 |= value64 << 32;
+#endif
+
+ prefix = (unsigned long)start % 8;
+ if (prefix) {
+ u8 *r;
+
+ prefix = 8 - prefix;
+ r = check_bytes8(start, value, prefix);
+ if (r)
+ return r;
+ start += prefix;
+ bytes -= prefix;
+ }
+
+ words = bytes / 8;
+
+ while (words) {
+ if (*(u64 *)start != value64)
+ return check_bytes8(start, value, 8);
+ start += 8;
+ words--;
+ }
+
+ return check_bytes8(start, value, bytes % 8);
+}
+EXPORT_SYMBOL(memchr_inv);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 000000000..9982344cc
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Helpers for formatting and printing strings
+ *
+ * Copyright 31 August 2008 James Bottomley
+ * Copyright (C) 2013, Intel Corporation
+ */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/export.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/limits.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
+/**
+ * string_get_size - get the size in the specified units
+ * @size: The size to be converted in blocks
+ * @blk_size: Size of the block (use 1 for size in bytes)
+ * @units: units to use (powers of 1000 or 1024)
+ * @buf: buffer to format to
+ * @len: length of buffer
+ *
+ * This function returns a string formatted to 3 significant figures
+ * giving the size in the required units. @buf should have room for
+ * at least 9 bytes and will always be zero terminated.
+ *
+ */
+void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len)
+{
+ static const char *const units_10[] = {
+ "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
+ };
+ static const char *const units_2[] = {
+ "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
+ };
+ static const char *const *const units_str[] = {
+ [STRING_UNITS_10] = units_10,
+ [STRING_UNITS_2] = units_2,
+ };
+ static const unsigned int divisor[] = {
+ [STRING_UNITS_10] = 1000,
+ [STRING_UNITS_2] = 1024,
+ };
+ static const unsigned int rounding[] = { 500, 50, 5 };
+ int i = 0, j;
+ u32 remainder = 0, sf_cap;
+ char tmp[8];
+ const char *unit;
+
+ tmp[0] = '\0';
+
+ if (blk_size == 0)
+ size = 0;
+ if (size == 0)
+ goto out;
+
+ /* This is Napier's algorithm. Reduce the original block size to
+ *
+ * coefficient * divisor[units]^i
+ *
+ * we do the reduction so both coefficients are just under 32 bits so
+ * that multiplying them together won't overflow 64 bits and we keep
+ * as much precision as possible in the numbers.
+ *
+ * Note: it's safe to throw away the remainders here because all the
+ * precision is in the coefficients.
+ */
+ while (blk_size >> 32) {
+ do_div(blk_size, divisor[units]);
+ i++;
+ }
+
+ while (size >> 32) {
+ do_div(size, divisor[units]);
+ i++;
+ }
+
+ /* now perform the actual multiplication keeping i as the sum of the
+ * two logarithms */
+ size *= blk_size;
+
+ /* and logarithmically reduce it until it's just under the divisor */
+ while (size >= divisor[units]) {
+ remainder = do_div(size, divisor[units]);
+ i++;
+ }
+
+ /* work out in j how many digits of precision we need from the
+ * remainder */
+ sf_cap = size;
+ for (j = 0; sf_cap*10 < 1000; j++)
+ sf_cap *= 10;
+
+ if (units == STRING_UNITS_2) {
+ /* express the remainder as a decimal. It's currently the
+ * numerator of a fraction whose denominator is
+ * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
+ remainder *= 1000;
+ remainder >>= 10;
+ }
+
+ /* add a 5 to the digit below what will be printed to ensure
+ * an arithmetical round up and carry it through to size */
+ remainder += rounding[j];
+ if (remainder >= 1000) {
+ remainder -= 1000;
+ size += 1;
+ }
+
+ if (j) {
+ snprintf(tmp, sizeof(tmp), ".%03u", remainder);
+ tmp[j+1] = '\0';
+ }
+
+ out:
+ if (i >= ARRAY_SIZE(units_2))
+ unit = "UNK";
+ else
+ unit = units_str[units][i];
+
+ snprintf(buf, len, "%u%s %s", (u32)size,
+ tmp, unit);
+}
+EXPORT_SYMBOL(string_get_size);
+
+/**
+ * parse_int_array_user - Split string into a sequence of integers
+ * @from: The user space buffer to read from
+ * @count: The maximum number of bytes to read
+ * @array: Returned pointer to sequence of integers
+ *
+ * On success @array is allocated and initialized with a sequence of
+ * integers extracted from the @from plus an additional element that
+ * begins the sequence and specifies the integers count.
+ *
+ * Caller takes responsibility for freeing @array when it is no longer
+ * needed.
+ */
+int parse_int_array_user(const char __user *from, size_t count, int **array)
+{
+ int *ints, nints;
+ char *buf;
+ int ret = 0;
+
+ buf = memdup_user_nul(from, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ get_options(buf, 0, &nints);
+ if (!nints) {
+ ret = -ENOENT;
+ goto free_buf;
+ }
+
+ ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
+ if (!ints) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ get_options(buf, nints + 1, ints);
+ *array = ints;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(parse_int_array_user);
+
+static bool unescape_space(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+
+ switch (*q) {
+ case 'n':
+ *p = '\n';
+ break;
+ case 'r':
+ *p = '\r';
+ break;
+ case 't':
+ *p = '\t';
+ break;
+ case 'v':
+ *p = '\v';
+ break;
+ case 'f':
+ *p = '\f';
+ break;
+ default:
+ return false;
+ }
+ *dst += 1;
+ *src += 1;
+ return true;
+}
+
+static bool unescape_octal(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+ u8 num;
+
+ if (isodigit(*q) == 0)
+ return false;
+
+ num = (*q++) & 7;
+ while (num < 32 && isodigit(*q) && (q - *src < 3)) {
+ num <<= 3;
+ num += (*q++) & 7;
+ }
+ *p = num;
+ *dst += 1;
+ *src = q;
+ return true;
+}
+
+static bool unescape_hex(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+ int digit;
+ u8 num;
+
+ if (*q++ != 'x')
+ return false;
+
+ num = digit = hex_to_bin(*q++);
+ if (digit < 0)
+ return false;
+
+ digit = hex_to_bin(*q);
+ if (digit >= 0) {
+ q++;
+ num = (num << 4) | digit;
+ }
+ *p = num;
+ *dst += 1;
+ *src = q;
+ return true;
+}
+
+static bool unescape_special(char **src, char **dst)
+{
+ char *p = *dst, *q = *src;
+
+ switch (*q) {
+ case '\"':
+ *p = '\"';
+ break;
+ case '\\':
+ *p = '\\';
+ break;
+ case 'a':
+ *p = '\a';
+ break;
+ case 'e':
+ *p = '\e';
+ break;
+ default:
+ return false;
+ }
+ *dst += 1;
+ *src += 1;
+ return true;
+}
+
+/**
+ * string_unescape - unquote characters in the given string
+ * @src: source buffer (escaped)
+ * @dst: destination buffer (unescaped)
+ * @size: size of the destination buffer (0 to unlimit)
+ * @flags: combination of the flags.
+ *
+ * Description:
+ * The function unquotes characters in the given string.
+ *
+ * Because the size of the output will be the same as or less than the size of
+ * the input, the transformation may be performed in place.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will always be NULL-terminated. Source string must be
+ * NULL-terminated as well. The supported flags are::
+ *
+ * UNESCAPE_SPACE:
+ * '\f' - form feed
+ * '\n' - new line
+ * '\r' - carriage return
+ * '\t' - horizontal tab
+ * '\v' - vertical tab
+ * UNESCAPE_OCTAL:
+ * '\NNN' - byte with octal value NNN (1 to 3 digits)
+ * UNESCAPE_HEX:
+ * '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
+ * UNESCAPE_SPECIAL:
+ * '\"' - double quote
+ * '\\' - backslash
+ * '\a' - alert (BEL)
+ * '\e' - escape
+ * UNESCAPE_ANY:
+ * all previous together
+ *
+ * Return:
+ * The amount of the characters processed to the destination buffer excluding
+ * trailing '\0' is returned.
+ */
+int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
+{
+ char *out = dst;
+
+ while (*src && --size) {
+ if (src[0] == '\\' && src[1] != '\0' && size > 1) {
+ src++;
+ size--;
+
+ if (flags & UNESCAPE_SPACE &&
+ unescape_space(&src, &out))
+ continue;
+
+ if (flags & UNESCAPE_OCTAL &&
+ unescape_octal(&src, &out))
+ continue;
+
+ if (flags & UNESCAPE_HEX &&
+ unescape_hex(&src, &out))
+ continue;
+
+ if (flags & UNESCAPE_SPECIAL &&
+ unescape_special(&src, &out))
+ continue;
+
+ *out++ = '\\';
+ }
+ *out++ = *src++;
+ }
+ *out = '\0';
+
+ return out - dst;
+}
+EXPORT_SYMBOL(string_unescape);
+
+static bool escape_passthrough(unsigned char c, char **dst, char *end)
+{
+ char *out = *dst;
+
+ if (out < end)
+ *out = c;
+ *dst = out + 1;
+ return true;
+}
+
+static bool escape_space(unsigned char c, char **dst, char *end)
+{
+ char *out = *dst;
+ unsigned char to;
+
+ switch (c) {
+ case '\n':
+ to = 'n';
+ break;
+ case '\r':
+ to = 'r';
+ break;
+ case '\t':
+ to = 't';
+ break;
+ case '\v':
+ to = 'v';
+ break;
+ case '\f':
+ to = 'f';
+ break;
+ default:
+ return false;
+ }
+
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = to;
+ ++out;
+
+ *dst = out;
+ return true;
+}
+
+static bool escape_special(unsigned char c, char **dst, char *end)
+{
+ char *out = *dst;
+ unsigned char to;
+
+ switch (c) {
+ case '\\':
+ to = '\\';
+ break;
+ case '\a':
+ to = 'a';
+ break;
+ case '\e':
+ to = 'e';
+ break;
+ case '"':
+ to = '"';
+ break;
+ default:
+ return false;
+ }
+
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = to;
+ ++out;
+
+ *dst = out;
+ return true;
+}
+
+static bool escape_null(unsigned char c, char **dst, char *end)
+{
+ char *out = *dst;
+
+ if (c)
+ return false;
+
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = '0';
+ ++out;
+
+ *dst = out;
+ return true;
+}
+
+static bool escape_octal(unsigned char c, char **dst, char *end)
+{
+ char *out = *dst;
+
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = ((c >> 6) & 0x07) + '0';
+ ++out;
+ if (out < end)
+ *out = ((c >> 3) & 0x07) + '0';
+ ++out;
+ if (out < end)
+ *out = ((c >> 0) & 0x07) + '0';
+ ++out;
+
+ *dst = out;
+ return true;
+}
+
+static bool escape_hex(unsigned char c, char **dst, char *end)
+{
+ char *out = *dst;
+
+ if (out < end)
+ *out = '\\';
+ ++out;
+ if (out < end)
+ *out = 'x';
+ ++out;
+ if (out < end)
+ *out = hex_asc_hi(c);
+ ++out;
+ if (out < end)
+ *out = hex_asc_lo(c);
+ ++out;
+
+ *dst = out;
+ return true;
+}
+
+/**
+ * string_escape_mem - quote characters in the given memory buffer
+ * @src: source buffer (unescaped)
+ * @isz: source buffer size
+ * @dst: destination buffer (escaped)
+ * @osz: destination buffer size
+ * @flags: combination of the flags
+ * @only: NULL-terminated string containing characters used to limit
+ * the selected escape class. If characters are included in @only
+ * that would not normally be escaped by the classes selected
+ * in @flags, they will be copied to @dst unescaped.
+ *
+ * Description:
+ * The process of escaping byte buffer includes several parts. They are applied
+ * in the following sequence.
+ *
+ * 1. The character is not matched to the one from @only string and thus
+ * must go as-is to the output.
+ * 2. The character is matched to the printable and ASCII classes, if asked,
+ * and in case of match it passes through to the output.
+ * 3. The character is matched to the printable or ASCII class, if asked,
+ * and in case of match it passes through to the output.
+ * 4. The character is checked if it falls into the class given by @flags.
+ * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
+ * character. Note that they actually can't go together, otherwise
+ * %ESCAPE_HEX will be ignored.
+ *
+ * Caller must provide valid source and destination pointers. Be aware that
+ * destination buffer will not be NULL-terminated, thus caller have to append
+ * it if needs. The supported flags are::
+ *
+ * %ESCAPE_SPACE: (special white space, not space itself)
+ * '\f' - form feed
+ * '\n' - new line
+ * '\r' - carriage return
+ * '\t' - horizontal tab
+ * '\v' - vertical tab
+ * %ESCAPE_SPECIAL:
+ * '\"' - double quote
+ * '\\' - backslash
+ * '\a' - alert (BEL)
+ * '\e' - escape
+ * %ESCAPE_NULL:
+ * '\0' - null
+ * %ESCAPE_OCTAL:
+ * '\NNN' - byte with octal value NNN (3 digits)
+ * %ESCAPE_ANY:
+ * all previous together
+ * %ESCAPE_NP:
+ * escape only non-printable characters, checked by isprint()
+ * %ESCAPE_ANY_NP:
+ * all previous together
+ * %ESCAPE_HEX:
+ * '\xHH' - byte with hexadecimal value HH (2 digits)
+ * %ESCAPE_NA:
+ * escape only non-ascii characters, checked by isascii()
+ * %ESCAPE_NAP:
+ * escape only non-printable or non-ascii characters
+ * %ESCAPE_APPEND:
+ * append characters from @only to be escaped by the given classes
+ *
+ * %ESCAPE_APPEND would help to pass additional characters to the escaped, when
+ * one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
+ *
+ * One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
+ * higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
+ * It doesn't make much sense to use either of them without %ESCAPE_OCTAL
+ * or %ESCAPE_HEX, because they cover most of the other character classes.
+ * %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
+ * the above.
+ *
+ * Return:
+ * The total size of the escaped output that would be generated for
+ * the given input and flags. To check whether the output was
+ * truncated, compare the return value to osz. There is room left in
+ * dst for a '\0' terminator if and only if ret < osz.
+ */
+int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
+ unsigned int flags, const char *only)
+{
+ char *p = dst;
+ char *end = p + osz;
+ bool is_dict = only && *only;
+ bool is_append = flags & ESCAPE_APPEND;
+
+ while (isz--) {
+ unsigned char c = *src++;
+ bool in_dict = is_dict && strchr(only, c);
+
+ /*
+ * Apply rules in the following sequence:
+ * - the @only string is supplied and does not contain a
+ * character under question
+ * - the character is printable and ASCII, when @flags has
+ * %ESCAPE_NAP bit set
+ * - the character is printable, when @flags has
+ * %ESCAPE_NP bit set
+ * - the character is ASCII, when @flags has
+ * %ESCAPE_NA bit set
+ * - the character doesn't fall into a class of symbols
+ * defined by given @flags
+ * In these cases we just pass through a character to the
+ * output buffer.
+ *
+ * When %ESCAPE_APPEND is passed, the characters from @only
+ * have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
+ * %ESCAPE_NA cases.
+ */
+ if (!(is_append || in_dict) && is_dict &&
+ escape_passthrough(c, &p, end))
+ continue;
+
+ if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
+ flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
+ continue;
+
+ if (!(is_append && in_dict) && isprint(c) &&
+ flags & ESCAPE_NP && escape_passthrough(c, &p, end))
+ continue;
+
+ if (!(is_append && in_dict) && isascii(c) &&
+ flags & ESCAPE_NA && escape_passthrough(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_NULL && escape_null(c, &p, end))
+ continue;
+
+ /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
+ if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
+ continue;
+
+ if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
+ continue;
+
+ escape_passthrough(c, &p, end);
+ }
+
+ return p - dst;
+}
+EXPORT_SYMBOL(string_escape_mem);
+
+/*
+ * Return an allocated string that has been escaped of special characters
+ * and double quotes, making it safe to log in quotes.
+ */
+char *kstrdup_quotable(const char *src, gfp_t gfp)
+{
+ size_t slen, dlen;
+ char *dst;
+ const int flags = ESCAPE_HEX;
+ const char esc[] = "\f\n\r\t\v\a\e\\\"";
+
+ if (!src)
+ return NULL;
+ slen = strlen(src);
+
+ dlen = string_escape_mem(src, slen, NULL, 0, flags, esc);
+ dst = kmalloc(dlen + 1, gfp);
+ if (!dst)
+ return NULL;
+
+ WARN_ON(string_escape_mem(src, slen, dst, dlen, flags, esc) != dlen);
+ dst[dlen] = '\0';
+
+ return dst;
+}
+EXPORT_SYMBOL_GPL(kstrdup_quotable);
+
+/*
+ * Returns allocated NULL-terminated string containing process
+ * command line, with inter-argument NULLs replaced with spaces,
+ * and other special characters escaped.
+ */
+char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp)
+{
+ char *buffer, *quoted;
+ int i, res;
+
+ buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return NULL;
+
+ res = get_cmdline(task, buffer, PAGE_SIZE - 1);
+ buffer[res] = '\0';
+
+ /* Collapse trailing NULLs, leave res pointing to last non-NULL. */
+ while (--res >= 0 && buffer[res] == '\0')
+ ;
+
+ /* Replace inter-argument NULLs. */
+ for (i = 0; i <= res; i++)
+ if (buffer[i] == '\0')
+ buffer[i] = ' ';
+
+ /* Make sure result is printable. */
+ quoted = kstrdup_quotable(buffer, gfp);
+ kfree(buffer);
+ return quoted;
+}
+EXPORT_SYMBOL_GPL(kstrdup_quotable_cmdline);
+
+/*
+ * Returns allocated NULL-terminated string containing pathname,
+ * with special characters escaped, able to be safely logged. If
+ * there is an error, the leading character will be "<".
+ */
+char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
+{
+ char *temp, *pathname;
+
+ if (!file)
+ return kstrdup("<unknown>", gfp);
+
+ /* We add 11 spaces for ' (deleted)' to be appended */
+ temp = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ if (!temp)
+ return kstrdup("<no_memory>", gfp);
+
+ pathname = file_path(file, temp, PATH_MAX + 11);
+ if (IS_ERR(pathname))
+ pathname = kstrdup("<too_long>", gfp);
+ else
+ pathname = kstrdup_quotable(pathname, gfp);
+
+ kfree(temp);
+ return pathname;
+}
+EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
+
+/*
+ * Returns duplicate string in which the @old characters are replaced by @new.
+ */
+char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp)
+{
+ char *dst;
+
+ dst = kstrdup(src, gfp);
+ if (!dst)
+ return NULL;
+
+ return strreplace(dst, old, new);
+}
+EXPORT_SYMBOL_GPL(kstrdup_and_replace);
+
+/**
+ * kasprintf_strarray - allocate and fill array of sequential strings
+ * @gfp: flags for the slab allocator
+ * @prefix: prefix to be used
+ * @n: amount of lines to be allocated and filled
+ *
+ * Allocates and fills @n strings using pattern "%s-%zu", where prefix
+ * is provided by caller. The caller is responsible to free them with
+ * kfree_strarray() after use.
+ *
+ * Returns array of strings or NULL when memory can't be allocated.
+ */
+char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n)
+{
+ char **names;
+ size_t i;
+
+ names = kcalloc(n + 1, sizeof(char *), gfp);
+ if (!names)
+ return NULL;
+
+ for (i = 0; i < n; i++) {
+ names[i] = kasprintf(gfp, "%s-%zu", prefix, i);
+ if (!names[i]) {
+ kfree_strarray(names, i);
+ return NULL;
+ }
+ }
+
+ return names;
+}
+EXPORT_SYMBOL_GPL(kasprintf_strarray);
+
+/**
+ * kfree_strarray - free a number of dynamically allocated strings contained
+ * in an array and the array itself
+ *
+ * @array: Dynamically allocated array of strings to free.
+ * @n: Number of strings (starting from the beginning of the array) to free.
+ *
+ * Passing a non-NULL @array and @n == 0 as well as NULL @array are valid
+ * use-cases. If @array is NULL, the function does nothing.
+ */
+void kfree_strarray(char **array, size_t n)
+{
+ unsigned int i;
+
+ if (!array)
+ return;
+
+ for (i = 0; i < n; i++)
+ kfree(array[i]);
+ kfree(array);
+}
+EXPORT_SYMBOL_GPL(kfree_strarray);
+
+struct strarray {
+ char **array;
+ size_t n;
+};
+
+static void devm_kfree_strarray(struct device *dev, void *res)
+{
+ struct strarray *array = res;
+
+ kfree_strarray(array->array, array->n);
+}
+
+char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
+{
+ struct strarray *ptr;
+
+ ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n);
+ if (!ptr->array) {
+ devres_free(ptr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ptr->n = n;
+ devres_add(dev, ptr);
+
+ return ptr->array;
+}
+EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
+
+/**
+ * strscpy_pad() - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always %NUL terminated, unless it's zero-sized.
+ *
+ * If the source string is shorter than the destination buffer, zeros
+ * the tail of the destination buffer.
+ *
+ * For full explanation of why you may want to consider using the
+ * 'strscpy' functions please see the function docstring for strscpy().
+ *
+ * Returns:
+ * * The number of characters copied (not including the trailing %NUL)
+ * * -E2BIG if count is 0 or @src was truncated.
+ */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count)
+{
+ ssize_t written;
+
+ written = strscpy(dest, src, count);
+ if (written < 0 || written == count - 1)
+ return written;
+
+ memset(dest + written + 1, 0, count - written - 1);
+
+ return written;
+}
+EXPORT_SYMBOL(strscpy_pad);
+
+/**
+ * skip_spaces - Removes leading whitespace from @str.
+ * @str: The string to be stripped.
+ *
+ * Returns a pointer to the first non-whitespace character in @str.
+ */
+char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+EXPORT_SYMBOL(skip_spaces);
+
+/**
+ * strim - Removes leading and trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns a pointer to the first non-whitespace
+ * character in @s.
+ */
+char *strim(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return skip_spaces(s);
+}
+EXPORT_SYMBOL(strim);
+
+/**
+ * sysfs_streq - return true if strings are equal, modulo trailing newline
+ * @s1: one string
+ * @s2: another string
+ *
+ * This routine returns true iff two strings are equal, treating both
+ * NUL and newline-then-NUL as equivalent string terminations. It's
+ * geared for use with sysfs input strings, which generally terminate
+ * with newlines but are compared against values without newlines.
+ */
+bool sysfs_streq(const char *s1, const char *s2)
+{
+ while (*s1 && *s1 == *s2) {
+ s1++;
+ s2++;
+ }
+
+ if (*s1 == *s2)
+ return true;
+ if (!*s1 && *s2 == '\n' && !s2[1])
+ return true;
+ if (*s1 == '\n' && !s1[1] && !*s2)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(sysfs_streq);
+
+/**
+ * match_string - matches given string in an array
+ * @array: array of strings
+ * @n: number of strings in the array or -1 for NULL terminated arrays
+ * @string: string to match with
+ *
+ * This routine will look for a string in an array of strings up to the
+ * n-th element in the array or until the first NULL element.
+ *
+ * Historically the value of -1 for @n, was used to search in arrays that
+ * are NULL terminated. However, the function does not make a distinction
+ * when finishing the search: either @n elements have been compared OR
+ * the first NULL element was found.
+ *
+ * Return:
+ * index of a @string in the @array if matches, or %-EINVAL otherwise.
+ */
+int match_string(const char * const *array, size_t n, const char *string)
+{
+ int index;
+ const char *item;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ break;
+ if (!strcmp(item, string))
+ return index;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(match_string);
+
+/**
+ * __sysfs_match_string - matches given string in an array
+ * @array: array of strings
+ * @n: number of strings in the array or -1 for NULL terminated arrays
+ * @str: string to match with
+ *
+ * Returns index of @str in the @array or -EINVAL, just like match_string().
+ * Uses sysfs_streq instead of strcmp for matching.
+ *
+ * This routine will look for a string in an array of strings up to the
+ * n-th element in the array or until the first NULL element.
+ *
+ * Historically the value of -1 for @n, was used to search in arrays that
+ * are NULL terminated. However, the function does not make a distinction
+ * when finishing the search: either @n elements have been compared OR
+ * the first NULL element was found.
+ */
+int __sysfs_match_string(const char * const *array, size_t n, const char *str)
+{
+ const char *item;
+ int index;
+
+ for (index = 0; index < n; index++) {
+ item = array[index];
+ if (!item)
+ break;
+ if (sysfs_streq(item, str))
+ return index;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(__sysfs_match_string);
+
+/**
+ * strreplace - Replace all occurrences of character in string.
+ * @str: The string to operate on.
+ * @old: The character being replaced.
+ * @new: The character @old is replaced with.
+ *
+ * Replaces the each @old character with a @new one in the given string @str.
+ *
+ * Return: pointer to the string @str itself.
+ */
+char *strreplace(char *str, char old, char new)
+{
+ char *s = str;
+
+ for (; *s; ++s)
+ if (*s == old)
+ *s = new;
+ return str;
+}
+EXPORT_SYMBOL(strreplace);
+
+/**
+ * memcpy_and_pad - Copy one buffer to another with padding
+ * @dest: Where to copy to
+ * @dest_len: The destination buffer size
+ * @src: Where to copy from
+ * @count: The number of bytes to copy
+ * @pad: Character to use for padding if space is left in destination.
+ */
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad)
+{
+ if (dest_len > count) {
+ memcpy(dest, src, count);
+ memset(dest + count, pad, dest_len - count);
+ } else {
+ memcpy(dest, src, dest_len);
+ }
+}
+EXPORT_SYMBOL(memcpy_and_pad);
+
+#ifdef CONFIG_FORTIFY_SOURCE
+/* These are placeholders for fortify compile-time warnings. */
+void __read_overflow2_field(size_t avail, size_t wanted) { }
+EXPORT_SYMBOL(__read_overflow2_field);
+void __write_overflow_field(size_t avail, size_t wanted) { }
+EXPORT_SYMBOL(__write_overflow_field);
+
+void fortify_panic(const char *name)
+{
+ pr_emerg("detected buffer overflow in %s\n", name);
+ BUG();
+}
+EXPORT_SYMBOL(fortify_panic);
+#endif /* CONFIG_FORTIFY_SOURCE */
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
new file mode 100644
index 000000000..6432b8c3e
--- /dev/null
+++ b/lib/strncpy_from_user.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/fault-inject-usercopy.h>
+#include <linux/kasan-checks.h>
+#include <linux/thread_info.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define IS_UNALIGNED(src, dst) 0
+#else
+#define IS_UNALIGNED(src, dst) \
+ (((long) dst | (long) src) & (sizeof(long) - 1))
+#endif
+
+/*
+ * Do a strncpy, return length of string without final '\0'.
+ * 'count' is the user-supplied count (return 'count' if we
+ * hit it), 'max' is the address space maximum (and we return
+ * -EFAULT if we hit it).
+ */
+static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
+ unsigned long count, unsigned long max)
+{
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ unsigned long res = 0;
+
+ if (IS_UNALIGNED(src, dst))
+ goto byte_at_a_time;
+
+ while (max >= sizeof(unsigned long)) {
+ unsigned long c, data, mask;
+
+ /* Fall back to byte-at-a-time if we get a page fault */
+ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
+ /*
+ * Note that we mask out the bytes following the NUL. This is
+ * important to do because string oblivious code may read past
+ * the NUL. For those routines, we don't want to give them
+ * potentially random bytes after the NUL in `src`.
+ *
+ * One example of such code is BPF map keys. BPF treats map keys
+ * as an opaque set of bytes. Without the post-NUL mask, any BPF
+ * maps keyed by strings returned from strncpy_from_user() may
+ * have multiple entries for semantically identical strings.
+ */
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ mask = zero_bytemask(data);
+ *(unsigned long *)(dst+res) = c & mask;
+ return res + find_zero(data);
+ }
+
+ *(unsigned long *)(dst+res) = c;
+
+ res += sizeof(unsigned long);
+ max -= sizeof(unsigned long);
+ }
+
+byte_at_a_time:
+ while (max) {
+ char c;
+
+ unsafe_get_user(c,src+res, efault);
+ dst[res] = c;
+ if (!c)
+ return res;
+ res++;
+ max--;
+ }
+
+ /*
+ * Uhhuh. We hit 'max'. But was that the user-specified maximum
+ * too? If so, that's ok - we got as much as the user asked for.
+ */
+ if (res >= count)
+ return res;
+
+ /*
+ * Nope: we hit the address space limit, and we still had more
+ * characters the caller would have wanted. That's an EFAULT.
+ */
+efault:
+ return -EFAULT;
+}
+
+/**
+ * strncpy_from_user: - Copy a NUL terminated string from userspace.
+ * @dst: Destination address, in kernel space. This buffer must be at
+ * least @count bytes long.
+ * @src: Source address, in user space.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from userspace to kernel space.
+ *
+ * On success, returns the length of the string (not including the trailing
+ * NUL).
+ *
+ * If access to userspace fails, returns -EFAULT (some data may have been
+ * copied).
+ *
+ * If @count is smaller than the length of the string, copies @count bytes
+ * and returns @count.
+ */
+long strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ unsigned long max_addr, src_addr;
+
+ might_fault();
+ if (should_fail_usercopy())
+ return -EFAULT;
+ if (unlikely(count <= 0))
+ return 0;
+
+ max_addr = TASK_SIZE_MAX;
+ src_addr = (unsigned long)untagged_addr(src);
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ long retval;
+
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ kasan_check_write(dst, count);
+ check_object_size(dst, count, false);
+ if (user_read_access_begin(src, max)) {
+ retval = do_strncpy_from_user(dst, src, count, max);
+ user_read_access_end();
+ return retval;
+ }
+ }
+ return -EFAULT;
+}
+EXPORT_SYMBOL(strncpy_from_user);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
new file mode 100644
index 000000000..feeb935a2
--- /dev/null
+++ b/lib/strnlen_user.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/word-at-a-time.h>
+
+/*
+ * Do a strnlen, return length of string *with* final '\0'.
+ * 'count' is the user-supplied count, while 'max' is the
+ * address space maximum.
+ *
+ * Return 0 for exceptions (which includes hitting the address
+ * space maximum), or 'count+1' if hitting the user-supplied
+ * maximum count.
+ *
+ * NOTE! We can sometimes overshoot the user-supplied maximum
+ * if it fits in a aligned 'long'. The caller needs to check
+ * the return value against "> max".
+ */
+static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+{
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ unsigned long align, res = 0;
+ unsigned long c;
+
+ /*
+ * Do everything aligned. But that means that we
+ * need to also expand the maximum..
+ */
+ align = (sizeof(unsigned long) - 1) & (unsigned long)src;
+ src -= align;
+ max += align;
+
+ unsafe_get_user(c, (unsigned long __user *)src, efault);
+ c |= aligned_byte_mask(align);
+
+ for (;;) {
+ unsigned long data;
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ return res + find_zero(data) + 1 - align;
+ }
+ res += sizeof(unsigned long);
+ /* We already handled 'unsigned long' bytes. Did we do it all ? */
+ if (unlikely(max <= sizeof(unsigned long)))
+ break;
+ max -= sizeof(unsigned long);
+ unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
+ }
+ res -= align;
+
+ /*
+ * Uhhuh. We hit 'max'. But was that the user-specified maximum
+ * too? If so, return the marker for "too long".
+ */
+ if (res >= count)
+ return count+1;
+
+ /*
+ * Nope: we hit the address space limit, and we still had more
+ * characters the caller would have wanted. That's 0.
+ */
+efault:
+ return 0;
+}
+
+/**
+ * strnlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ * @count: Maximum count (including NUL character)
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * If the string is too long, returns a number larger than @count. User
+ * has to check the return value against "> count".
+ * On exception (or invalid count), returns 0.
+ *
+ * NOTE! You should basically never use this function. There is
+ * almost never any valid case for using the length of a user space
+ * string, since the string can be changed at any time by other
+ * threads. Use "strncpy_from_user()" instead to get a stable copy
+ * of the string.
+ */
+long strnlen_user(const char __user *str, long count)
+{
+ unsigned long max_addr, src_addr;
+
+ if (unlikely(count <= 0))
+ return 0;
+
+ max_addr = TASK_SIZE_MAX;
+ src_addr = (unsigned long)untagged_addr(str);
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ long retval;
+
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ if (user_read_access_begin(str, max)) {
+ retval = do_strnlen_user(str, count, max);
+ user_read_access_end();
+ return retval;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strnlen_user);
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
new file mode 100644
index 000000000..a6b634435
--- /dev/null
+++ b/lib/strscpy_kunit.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Kernel module for testing 'strscpy' family of functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+/*
+ * tc() - Run a specific test case.
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void tc(struct kunit *test, char *src, int count, int expected,
+ int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void strscpy_test(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * tc() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* tc(test, src, count, expected, chars, terminator, pad) */
+ tc(test, "a", 0, -E2BIG, 0, 0, 0);
+ tc(test, "", 0, -E2BIG, 0, 0, 0);
+
+ tc(test, "a", 1, -E2BIG, 0, 1, 0);
+ tc(test, "", 1, 0, 0, 1, 0);
+
+ tc(test, "ab", 2, -E2BIG, 1, 1, 0);
+ tc(test, "a", 2, 1, 1, 1, 0);
+ tc(test, "", 2, 0, 0, 1, 1);
+
+ tc(test, "abc", 3, -E2BIG, 2, 1, 0);
+ tc(test, "ab", 3, 2, 2, 1, 0);
+ tc(test, "a", 3, 1, 1, 1, 1);
+ tc(test, "", 3, 0, 0, 1, 2);
+
+ tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ tc(test, "abc", 4, 3, 3, 1, 0);
+ tc(test, "ab", 4, 2, 2, 1, 1);
+ tc(test, "a", 4, 1, 1, 1, 2);
+ tc(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static struct kunit_case strscpy_test_cases[] = {
+ KUNIT_CASE(strscpy_test),
+ {}
+};
+
+static struct kunit_suite strscpy_test_suite = {
+ .name = "strscpy",
+ .test_cases = strscpy_test_cases,
+};
+
+kunit_test_suite(strscpy_test_suite);
+
+MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/syscall.c b/lib/syscall.c
new file mode 100644
index 000000000..006e256d2
--- /dev/null
+++ b/lib/syscall.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/export.h>
+#include <asm/syscall.h>
+
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
+{
+ unsigned long args[6] = { };
+ struct pt_regs *regs;
+
+ if (!try_get_task_stack(target)) {
+ /* Task has no stack, so the task isn't in a syscall. */
+ memset(info, 0, sizeof(*info));
+ info->data.nr = -1;
+ return 0;
+ }
+
+ regs = task_pt_regs(target);
+ if (unlikely(!regs)) {
+ put_task_stack(target);
+ return -EAGAIN;
+ }
+
+ info->sp = user_stack_pointer(regs);
+ info->data.instruction_pointer = instruction_pointer(regs);
+
+ info->data.nr = syscall_get_nr(target, regs);
+ if (info->data.nr != -1L)
+ syscall_get_arguments(target, regs, args);
+
+ info->data.args[0] = args[0];
+ info->data.args[1] = args[1];
+ info->data.args[2] = args[2];
+ info->data.args[3] = args[3];
+ info->data.args[4] = args[4];
+ info->data.args[5] = args[5];
+
+ put_task_stack(target);
+ return 0;
+}
+
+/**
+ * task_current_syscall - Discover what a blocked task is doing.
+ * @target: thread to examine
+ * @info: structure with the following fields:
+ * .sp - filled with user stack pointer
+ * .data.nr - filled with system call number or -1
+ * .data.args - filled with @maxargs system call arguments
+ * .data.instruction_pointer - filled with user PC
+ *
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
+ * call is still in progress. Note we may get this result if @target
+ * has finished its system call but not yet returned to user mode, such
+ * as when it's stopped for signal handling or syscall exit tracing.
+ *
+ * If @target is blocked in the kernel during a fault or exception,
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
+ *
+ * Returns -%EAGAIN if @target does not remain blocked.
+ */
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
+{
+ unsigned long ncsw;
+ unsigned int state;
+
+ if (target == current)
+ return collect_syscall(target, info);
+
+ state = READ_ONCE(target->__state);
+ if (unlikely(!state))
+ return -EAGAIN;
+
+ ncsw = wait_task_inactive(target, state);
+ if (unlikely(!ncsw) ||
+ unlikely(collect_syscall(target, info)) ||
+ unlikely(wait_task_inactive(target, state) != ncsw))
+ return -EAGAIN;
+
+ return 0;
+}
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
new file mode 100644
index 000000000..f355f6716
--- /dev/null
+++ b/lib/test-kstrtox.c
@@ -0,0 +1,735 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define for_each_test(i, test) \
+ for (i = 0; i < ARRAY_SIZE(test); i++)
+
+struct test_fail {
+ const char *str;
+ unsigned int base;
+};
+
+#define DEFINE_TEST_FAIL(test) \
+ const struct test_fail test[] __initconst
+
+#define DECLARE_TEST_OK(type, test_type) \
+ test_type { \
+ const char *str; \
+ unsigned int base; \
+ type expected_res; \
+ }
+
+#define DEFINE_TEST_OK(type, test) \
+ const type test[] __initconst
+
+#define TEST_FAIL(fn, type, fmt, test) \
+{ \
+ unsigned int i; \
+ \
+ for_each_test(i, test) { \
+ const struct test_fail *t = &test[i]; \
+ type tmp; \
+ int rv; \
+ \
+ tmp = 0; \
+ rv = fn(t->str, t->base, &tmp); \
+ if (rv >= 0) { \
+ WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \
+ t->str, t->base, rv, tmp); \
+ continue; \
+ } \
+ } \
+}
+
+#define TEST_OK(fn, type, fmt, test) \
+{ \
+ unsigned int i; \
+ \
+ for_each_test(i, test) { \
+ const typeof(test[0]) *t = &test[i]; \
+ type res; \
+ int rv; \
+ \
+ rv = fn(t->str, t->base, &res); \
+ if (rv != 0) { \
+ WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \
+ t->str, t->base, t->expected_res, rv); \
+ continue; \
+ } \
+ if (res != t->expected_res) { \
+ WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \
+ t->str, t->base, t->expected_res, res); \
+ continue; \
+ } \
+ } \
+}
+
+static void __init test_kstrtoull_ok(void)
+{
+ DECLARE_TEST_OK(unsigned long long, struct test_ull);
+ static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = {
+ {"0", 10, 0ULL},
+ {"1", 10, 1ULL},
+ {"127", 10, 127ULL},
+ {"128", 10, 128ULL},
+ {"129", 10, 129ULL},
+ {"255", 10, 255ULL},
+ {"256", 10, 256ULL},
+ {"257", 10, 257ULL},
+ {"32767", 10, 32767ULL},
+ {"32768", 10, 32768ULL},
+ {"32769", 10, 32769ULL},
+ {"65535", 10, 65535ULL},
+ {"65536", 10, 65536ULL},
+ {"65537", 10, 65537ULL},
+ {"2147483647", 10, 2147483647ULL},
+ {"2147483648", 10, 2147483648ULL},
+ {"2147483649", 10, 2147483649ULL},
+ {"4294967295", 10, 4294967295ULL},
+ {"4294967296", 10, 4294967296ULL},
+ {"4294967297", 10, 4294967297ULL},
+ {"9223372036854775807", 10, 9223372036854775807ULL},
+ {"9223372036854775808", 10, 9223372036854775808ULL},
+ {"9223372036854775809", 10, 9223372036854775809ULL},
+ {"18446744073709551614", 10, 18446744073709551614ULL},
+ {"18446744073709551615", 10, 18446744073709551615ULL},
+
+ {"00", 8, 00ULL},
+ {"01", 8, 01ULL},
+ {"0177", 8, 0177ULL},
+ {"0200", 8, 0200ULL},
+ {"0201", 8, 0201ULL},
+ {"0377", 8, 0377ULL},
+ {"0400", 8, 0400ULL},
+ {"0401", 8, 0401ULL},
+ {"077777", 8, 077777ULL},
+ {"0100000", 8, 0100000ULL},
+ {"0100001", 8, 0100001ULL},
+ {"0177777", 8, 0177777ULL},
+ {"0200000", 8, 0200000ULL},
+ {"0200001", 8, 0200001ULL},
+ {"017777777777", 8, 017777777777ULL},
+ {"020000000000", 8, 020000000000ULL},
+ {"020000000001", 8, 020000000001ULL},
+ {"037777777777", 8, 037777777777ULL},
+ {"040000000000", 8, 040000000000ULL},
+ {"040000000001", 8, 040000000001ULL},
+ {"0777777777777777777777", 8, 0777777777777777777777ULL},
+ {"01000000000000000000000", 8, 01000000000000000000000ULL},
+ {"01000000000000000000001", 8, 01000000000000000000001ULL},
+ {"01777777777777777777776", 8, 01777777777777777777776ULL},
+ {"01777777777777777777777", 8, 01777777777777777777777ULL},
+
+ {"0x0", 16, 0x0ULL},
+ {"0x1", 16, 0x1ULL},
+ {"0x7f", 16, 0x7fULL},
+ {"0x80", 16, 0x80ULL},
+ {"0x81", 16, 0x81ULL},
+ {"0xff", 16, 0xffULL},
+ {"0x100", 16, 0x100ULL},
+ {"0x101", 16, 0x101ULL},
+ {"0x7fff", 16, 0x7fffULL},
+ {"0x8000", 16, 0x8000ULL},
+ {"0x8001", 16, 0x8001ULL},
+ {"0xffff", 16, 0xffffULL},
+ {"0x10000", 16, 0x10000ULL},
+ {"0x10001", 16, 0x10001ULL},
+ {"0x7fffffff", 16, 0x7fffffffULL},
+ {"0x80000000", 16, 0x80000000ULL},
+ {"0x80000001", 16, 0x80000001ULL},
+ {"0xffffffff", 16, 0xffffffffULL},
+ {"0x100000000", 16, 0x100000000ULL},
+ {"0x100000001", 16, 0x100000001ULL},
+ {"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL},
+ {"0x8000000000000000", 16, 0x8000000000000000ULL},
+ {"0x8000000000000001", 16, 0x8000000000000001ULL},
+ {"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL},
+ {"0xffffffffffffffff", 16, 0xffffffffffffffffULL},
+
+ {"0\n", 0, 0ULL},
+ };
+ TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok);
+}
+
+static void __init test_kstrtoull_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_ull_fail) = {
+ {"", 0},
+ {"", 8},
+ {"", 10},
+ {"", 16},
+ {"\n", 0},
+ {"\n", 8},
+ {"\n", 10},
+ {"\n", 16},
+ {"\n0", 0},
+ {"\n0", 8},
+ {"\n0", 10},
+ {"\n0", 16},
+ {"+", 0},
+ {"+", 8},
+ {"+", 10},
+ {"+", 16},
+ {"-", 0},
+ {"-", 8},
+ {"-", 10},
+ {"-", 16},
+ {"0x", 0},
+ {"0x", 16},
+ {"0X", 0},
+ {"0X", 16},
+ {"0 ", 0},
+ {"1+", 0},
+ {"1-", 0},
+ {" 2", 0},
+ /* base autodetection */
+ {"0x0z", 0},
+ {"0z", 0},
+ {"a", 0},
+ /* digit >= base */
+ {"2", 2},
+ {"8", 8},
+ {"a", 10},
+ {"A", 10},
+ {"g", 16},
+ {"G", 16},
+ /* overflow */
+ {"10000000000000000000000000000000000000000000000000000000000000000", 2},
+ {"2000000000000000000000", 8},
+ {"18446744073709551616", 10},
+ {"10000000000000000", 16},
+ /* negative */
+ {"-0", 0},
+ {"-0", 8},
+ {"-0", 10},
+ {"-0", 16},
+ {"-1", 0},
+ {"-1", 8},
+ {"-1", 10},
+ {"-1", 16},
+ /* sign is first character if any */
+ {"-+1", 0},
+ {"-+1", 8},
+ {"-+1", 10},
+ {"-+1", 16},
+ /* nothing after \n */
+ {"0\n0", 0},
+ {"0\n0", 8},
+ {"0\n0", 10},
+ {"0\n0", 16},
+ {"0\n+", 0},
+ {"0\n+", 8},
+ {"0\n+", 10},
+ {"0\n+", 16},
+ {"0\n-", 0},
+ {"0\n-", 8},
+ {"0\n-", 10},
+ {"0\n-", 16},
+ {"0\n ", 0},
+ {"0\n ", 8},
+ {"0\n ", 10},
+ {"0\n ", 16},
+ };
+ TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail);
+}
+
+static void __init test_kstrtoll_ok(void)
+{
+ DECLARE_TEST_OK(long long, struct test_ll);
+ static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = {
+ {"0", 10, 0LL},
+ {"1", 10, 1LL},
+ {"127", 10, 127LL},
+ {"128", 10, 128LL},
+ {"129", 10, 129LL},
+ {"255", 10, 255LL},
+ {"256", 10, 256LL},
+ {"257", 10, 257LL},
+ {"32767", 10, 32767LL},
+ {"32768", 10, 32768LL},
+ {"32769", 10, 32769LL},
+ {"65535", 10, 65535LL},
+ {"65536", 10, 65536LL},
+ {"65537", 10, 65537LL},
+ {"2147483647", 10, 2147483647LL},
+ {"2147483648", 10, 2147483648LL},
+ {"2147483649", 10, 2147483649LL},
+ {"4294967295", 10, 4294967295LL},
+ {"4294967296", 10, 4294967296LL},
+ {"4294967297", 10, 4294967297LL},
+ {"9223372036854775807", 10, 9223372036854775807LL},
+
+ {"-0", 10, 0LL},
+ {"-1", 10, -1LL},
+ {"-2", 10, -2LL},
+ {"-9223372036854775808", 10, LLONG_MIN},
+ };
+ TEST_OK(kstrtoll, long long, "%lld", test_ll_ok);
+}
+
+static void __init test_kstrtoll_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_ll_fail) = {
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"-9223372036854775809", 10},
+ {"-18446744073709551614", 10},
+ {"-18446744073709551615", 10},
+ /* sign is first character if any */
+ {"-+1", 0},
+ {"-+1", 8},
+ {"-+1", 10},
+ {"-+1", 16},
+ };
+ TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail);
+}
+
+static void __init test_kstrtou64_ok(void)
+{
+ DECLARE_TEST_OK(u64, struct test_u64);
+ static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ {"2147483648", 10, 2147483648ULL},
+ {"2147483649", 10, 2147483649ULL},
+ {"4294967294", 10, 4294967294ULL},
+ {"4294967295", 10, 4294967295ULL},
+ {"4294967296", 10, 4294967296ULL},
+ {"4294967297", 10, 4294967297ULL},
+ {"9223372036854775806", 10, 9223372036854775806ULL},
+ {"9223372036854775807", 10, 9223372036854775807ULL},
+ {"9223372036854775808", 10, 9223372036854775808ULL},
+ {"9223372036854775809", 10, 9223372036854775809ULL},
+ {"18446744073709551614", 10, 18446744073709551614ULL},
+ {"18446744073709551615", 10, 18446744073709551615ULL},
+ };
+ TEST_OK(kstrtou64, u64, "%llu", test_u64_ok);
+}
+
+static void __init test_kstrtou64_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u64_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail);
+}
+
+static void __init test_kstrtos64_ok(void)
+{
+ DECLARE_TEST_OK(s64, struct test_s64);
+ static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = {
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ {"2147483648", 10, 2147483648LL},
+ {"2147483649", 10, 2147483649LL},
+ {"4294967294", 10, 4294967294LL},
+ {"4294967295", 10, 4294967295LL},
+ {"4294967296", 10, 4294967296LL},
+ {"4294967297", 10, 4294967297LL},
+ {"9223372036854775806", 10, 9223372036854775806LL},
+ {"9223372036854775807", 10, 9223372036854775807LL},
+ };
+ TEST_OK(kstrtos64, s64, "%lld", test_s64_ok);
+}
+
+static void __init test_kstrtos64_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s64_fail) = {
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail);
+}
+
+static void __init test_kstrtou32_ok(void)
+{
+ DECLARE_TEST_OK(u32, struct test_u32);
+ static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ {"2147483648", 10, 2147483648U},
+ {"2147483649", 10, 2147483649U},
+ {"4294967294", 10, 4294967294U},
+ {"4294967295", 10, 4294967295U},
+ };
+ TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
+}
+
+static void __init test_kstrtou32_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u32_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail);
+}
+
+static void __init test_kstrtos32_ok(void)
+{
+ DECLARE_TEST_OK(s32, struct test_s32);
+ static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = {
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ {"65536", 10, 65536},
+ {"65537", 10, 65537},
+ {"2147483646", 10, 2147483646},
+ {"2147483647", 10, 2147483647},
+ };
+ TEST_OK(kstrtos32, s32, "%d", test_s32_ok);
+}
+
+static void __init test_kstrtos32_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s32_fail) = {
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail);
+}
+
+static void __init test_kstrtou16_ok(void)
+{
+ DECLARE_TEST_OK(u16, struct test_u16);
+ static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ {"32768", 10, 32768},
+ {"32769", 10, 32769},
+ {"65534", 10, 65534},
+ {"65535", 10, 65535},
+ };
+ TEST_OK(kstrtou16, u16, "%hu", test_u16_ok);
+}
+
+static void __init test_kstrtou16_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u16_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail);
+}
+
+static void __init test_kstrtos16_ok(void)
+{
+ DECLARE_TEST_OK(s16, struct test_s16);
+ static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = {
+ {"-130", 10, -130},
+ {"-129", 10, -129},
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ {"256", 10, 256},
+ {"257", 10, 257},
+ {"32766", 10, 32766},
+ {"32767", 10, 32767},
+ };
+ TEST_OK(kstrtos16, s16, "%hd", test_s16_ok);
+}
+
+static void __init test_kstrtos16_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s16_fail) = {
+ {"32768", 10},
+ {"32769", 10},
+ {"65534", 10},
+ {"65535", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail);
+}
+
+static void __init test_kstrtou8_ok(void)
+{
+ DECLARE_TEST_OK(u8, struct test_u8);
+ static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = {
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ {"128", 10, 128},
+ {"129", 10, 129},
+ {"254", 10, 254},
+ {"255", 10, 255},
+ };
+ TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok);
+}
+
+static void __init test_kstrtou8_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_u8_fail) = {
+ {"-2", 10},
+ {"-1", 10},
+ {"256", 10},
+ {"257", 10},
+ {"32766", 10},
+ {"32767", 10},
+ {"32768", 10},
+ {"32769", 10},
+ {"65534", 10},
+ {"65535", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail);
+}
+
+static void __init test_kstrtos8_ok(void)
+{
+ DECLARE_TEST_OK(s8, struct test_s8);
+ static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = {
+ {"-128", 10, -128},
+ {"-127", 10, -127},
+ {"-1", 10, -1},
+ {"0", 10, 0},
+ {"1", 10, 1},
+ {"126", 10, 126},
+ {"127", 10, 127},
+ };
+ TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok);
+}
+
+static void __init test_kstrtos8_fail(void)
+{
+ static DEFINE_TEST_FAIL(test_s8_fail) = {
+ {"-130", 10},
+ {"-129", 10},
+ {"128", 10},
+ {"129", 10},
+ {"254", 10},
+ {"255", 10},
+ {"256", 10},
+ {"257", 10},
+ {"32766", 10},
+ {"32767", 10},
+ {"32768", 10},
+ {"32769", 10},
+ {"65534", 10},
+ {"65535", 10},
+ {"65536", 10},
+ {"65537", 10},
+ {"2147483646", 10},
+ {"2147483647", 10},
+ {"2147483648", 10},
+ {"2147483649", 10},
+ {"4294967294", 10},
+ {"4294967295", 10},
+ {"4294967296", 10},
+ {"4294967297", 10},
+ {"9223372036854775806", 10},
+ {"9223372036854775807", 10},
+ {"9223372036854775808", 10},
+ {"9223372036854775809", 10},
+ {"18446744073709551614", 10},
+ {"18446744073709551615", 10},
+ {"18446744073709551616", 10},
+ {"18446744073709551617", 10},
+ };
+ TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail);
+}
+
+static int __init test_kstrtox_init(void)
+{
+ test_kstrtoull_ok();
+ test_kstrtoull_fail();
+ test_kstrtoll_ok();
+ test_kstrtoll_fail();
+
+ test_kstrtou64_ok();
+ test_kstrtou64_fail();
+ test_kstrtos64_ok();
+ test_kstrtos64_fail();
+
+ test_kstrtou32_ok();
+ test_kstrtou32_fail();
+ test_kstrtos32_ok();
+ test_kstrtos32_fail();
+
+ test_kstrtou16_ok();
+ test_kstrtou16_fail();
+ test_kstrtos16_ok();
+ test_kstrtos16_fail();
+
+ test_kstrtou8_ok();
+ test_kstrtou8_fail();
+ test_kstrtos8_ok();
+ test_kstrtos8_fail();
+ return -EINVAL;
+}
+module_init(test_kstrtox_init);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
new file mode 100644
index 000000000..9a68849a5
--- /dev/null
+++ b/lib/test-string_helpers.c
@@ -0,0 +1,609 @@
+/*
+ * Test cases for lib/string_helpers.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
+static __init bool test_string_check_buf(const char *name, unsigned int flags,
+ char *in, size_t p,
+ char *out_real, size_t q_real,
+ char *out_test, size_t q_test)
+{
+ if (q_real == q_test && !memcmp(out_test, out_real, q_test))
+ return true;
+
+ pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
+
+ print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
+ in, p, true);
+ print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1,
+ out_test, q_test, true);
+ print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1,
+ out_real, q_real, true);
+
+ return false;
+}
+
+struct test_string {
+ const char *in;
+ const char *out;
+ unsigned int flags;
+};
+
+static const struct test_string strings[] __initconst = {
+ {
+ .in = "\\f\\ \\n\\r\\t\\v",
+ .out = "\f\\ \n\r\t\v",
+ .flags = UNESCAPE_SPACE,
+ },
+ {
+ .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777",
+ .out = " \001\00387\0064\005 \\8aH?7",
+ .flags = UNESCAPE_OCTAL,
+ },
+ {
+ .in = "\\xv\\xa\\x2c\\xD\\x6f2",
+ .out = "\\xv\n,\ro2",
+ .flags = UNESCAPE_HEX,
+ },
+ {
+ .in = "\\h\\\\\\\"\\a\\e\\",
+ .out = "\\h\\\"\a\e\\",
+ .flags = UNESCAPE_SPECIAL,
+ },
+};
+
+static void __init test_string_unescape(const char *name, unsigned int flags,
+ bool inplace)
+{
+ int q_real = 256;
+ char *in = kmalloc(q_real, GFP_KERNEL);
+ char *out_test = kmalloc(q_real, GFP_KERNEL);
+ char *out_real = kmalloc(q_real, GFP_KERNEL);
+ int i, p = 0, q_test = 0;
+
+ if (!in || !out_test || !out_real)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(strings); i++) {
+ const char *s = strings[i].in;
+ int len = strlen(strings[i].in);
+
+ /* Copy string to in buffer */
+ memcpy(&in[p], s, len);
+ p += len;
+
+ /* Copy expected result for given flags */
+ if (flags & strings[i].flags) {
+ s = strings[i].out;
+ len = strlen(strings[i].out);
+ }
+ memcpy(&out_test[q_test], s, len);
+ q_test += len;
+ }
+ in[p++] = '\0';
+
+ /* Call string_unescape and compare result */
+ if (inplace) {
+ memcpy(out_real, in, p);
+ if (flags == UNESCAPE_ANY)
+ q_real = string_unescape_any_inplace(out_real);
+ else
+ q_real = string_unescape_inplace(out_real, flags);
+ } else if (flags == UNESCAPE_ANY) {
+ q_real = string_unescape_any(in, out_real, q_real);
+ } else {
+ q_real = string_unescape(in, out_real, q_real, flags);
+ }
+
+ test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
+ out_test, q_test);
+out:
+ kfree(out_real);
+ kfree(out_test);
+ kfree(in);
+}
+
+struct test_string_1 {
+ const char *out;
+ unsigned int flags;
+};
+
+#define TEST_STRING_2_MAX_S1 32
+struct test_string_2 {
+ const char *in;
+ struct test_string_1 s1[TEST_STRING_2_MAX_S1];
+};
+
+#define TEST_STRING_2_DICT_0 NULL
+static const struct test_string_2 escape0[] __initconst = {{
+ .in = "\f\\ \n\r\t\v",
+ .s1 = {{
+ .out = "\\f\\ \\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE,
+ },{
+ .out = "\\f\\134\\040\\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
+ },{
+ .out = "\\f\\x5c\\x20\\n\\r\\t\\v",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\\h\\\"\a\e\\",
+ .s1 = {{
+ .out = "\\\\h\\\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL,
+ },{
+ .out = "\\\\\\150\\\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\\\\\\x68\\\\\\\"\\a\\e\\\\",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\eb \\C\007\"\x90\r]",
+ .s1 = {{
+ .out = "\eb \\C\007\"\x90\\r]",
+ .flags = ESCAPE_SPACE,
+ },{
+ .out = "\\eb \\\\C\\a\\\"\x90\r]",
+ .flags = ESCAPE_SPECIAL,
+ },{
+ .out = "\\eb \\\\C\\a\\\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
+ },{
+ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
+ },{
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\015\\135",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\r\\135",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
+ },{
+ .out = "\eb \\C\007\"\x90\r]",
+ .flags = ESCAPE_NP,
+ },{
+ .out = "\eb \\C\007\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\x90\r]",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\x90\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NP,
+ },{
+ .out = "\\033b \\C\\007\"\\220\\015]",
+ .flags = ESCAPE_OCTAL | ESCAPE_NP,
+ },{
+ .out = "\\033b \\C\\007\"\\220\\r]",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NP,
+ },{
+ .out = "\\eb \\C\\a\"\\220\\r]",
+ .flags = ESCAPE_SPECIAL | ESCAPE_SPACE | ESCAPE_OCTAL |
+ ESCAPE_NP,
+ },{
+ .out = "\\x1bb \\C\\x07\"\\x90\\x0d]",
+ .flags = ESCAPE_NP | ESCAPE_HEX,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\\220\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ /* terminator */
+}};
+
+#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
+static const struct test_string_2 escape1[] __initconst = {{
+ .in = "\f\\ \n\r\t\v",
+ .s1 = {{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\014\\134\\040\\012\\015\\011\\013",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\f\\134\\040\n\\015\\011\v",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
+ .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\\h\\\"\a\xCF\e\\",
+ .s1 = {{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_ANY | ESCAPE_APPEND,
+ },{
+ .out = "\\134h\\134\"\\007\\317\\033\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP,
+ },{
+ .out = "\\134h\\134\"\a\\317\e\\134",
+ .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\eb \\C\007\"\x90\r]",
+ .s1 = {{
+ .out = "\e\\142\\040\\134C\007\"\x90\\015]",
+ .flags = ESCAPE_OCTAL,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA,
+ },{
+ /* terminator */
+ }}
+},{
+ .in = "\007 \eb\"\x90\xCF\r",
+ .s1 = {{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\r",
+ .flags = ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\xCF\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\\015",
+ .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\317\r",
+ .flags = ESCAPE_ANY | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\x0d",
+ .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ .out = "\007 \eb\"\x90\\xcf\\r",
+ .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP,
+ },{
+ /* terminator */
+ }}
+},{
+ /* terminator */
+}};
+
+static const struct test_string strings_upper[] __initconst = {
+ {
+ .in = "abcdefgh1234567890test",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "ABCDEFGH1234567890TEST",
+ },
+};
+
+static const struct test_string strings_lower[] __initconst = {
+ {
+ .in = "ABCDEFGH1234567890TEST",
+ .out = "abcdefgh1234567890test",
+ },
+ {
+ .in = "abCdeFgH1234567890TesT",
+ .out = "abcdefgh1234567890test",
+ },
+};
+
+static __init const char *test_string_find_match(const struct test_string_2 *s2,
+ unsigned int flags)
+{
+ const struct test_string_1 *s1 = s2->s1;
+ unsigned int i;
+
+ if (!flags)
+ return s2->in;
+
+ /* Test cases are NULL-aware */
+ flags &= ~ESCAPE_NULL;
+
+ /* ESCAPE_OCTAL has a higher priority */
+ if (flags & ESCAPE_OCTAL)
+ flags &= ~ESCAPE_HEX;
+
+ for (i = 0; i < TEST_STRING_2_MAX_S1 && s1->out; i++, s1++)
+ if (s1->flags == flags)
+ return s1->out;
+ return NULL;
+}
+
+static __init void
+test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc,
+ int q_test, const char *name)
+{
+ int q_real;
+
+ q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
+ if (q_real != q_test)
+ pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
+ name, flags, q_test, q_real);
+}
+
+static __init void test_string_escape(const char *name,
+ const struct test_string_2 *s2,
+ unsigned int flags, const char *esc)
+{
+ size_t out_size = 512;
+ char *out_test = kmalloc(out_size, GFP_KERNEL);
+ char *out_real = kmalloc(out_size, GFP_KERNEL);
+ char *in = kmalloc(256, GFP_KERNEL);
+ int p = 0, q_test = 0;
+ int q_real;
+
+ if (!out_test || !out_real || !in)
+ goto out;
+
+ for (; s2->in; s2++) {
+ const char *out;
+ int len;
+
+ /* NULL injection */
+ if (flags & ESCAPE_NULL) {
+ in[p++] = '\0';
+ /* '\0' passes isascii() test */
+ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) {
+ out_test[q_test++] = '\0';
+ } else {
+ out_test[q_test++] = '\\';
+ out_test[q_test++] = '0';
+ }
+ }
+
+ /* Don't try strings that have no output */
+ out = test_string_find_match(s2, flags);
+ if (!out)
+ continue;
+
+ /* Copy string to in buffer */
+ len = strlen(s2->in);
+ memcpy(&in[p], s2->in, len);
+ p += len;
+
+ /* Copy expected result for given flags */
+ len = strlen(out);
+ memcpy(&out_test[q_test], out, len);
+ q_test += len;
+ }
+
+ q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
+
+ test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
+ q_test);
+
+ test_string_escape_overflow(in, p, flags, esc, q_test, name);
+
+out:
+ kfree(in);
+ kfree(out_real);
+ kfree(out_test);
+}
+
+#define string_get_size_maxbuf 16
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
+ do { \
+ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
+ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
+ __test_string_get_size((size), (blk_size), (exp_result10), \
+ (exp_result2)); \
+ } while (0)
+
+
+static __init void test_string_get_size_check(const char *units,
+ const char *exp,
+ char *res,
+ const u64 size,
+ const u64 blk_size)
+{
+ if (!memcmp(res, exp, strlen(exp) + 1))
+ return;
+
+ res[string_get_size_maxbuf - 1] = '\0';
+
+ pr_warn("Test 'test_string_get_size' failed!\n");
+ pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
+ size, blk_size, units);
+ pr_warn("expected: '%s', got '%s'\n", exp, res);
+}
+
+static __init void __test_string_get_size(const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2)
+{
+ char buf10[string_get_size_maxbuf];
+ char buf2[string_get_size_maxbuf];
+
+ string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
+ string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+
+ test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
+ size, blk_size);
+
+ test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
+ size, blk_size);
+}
+
+static __init void test_string_get_size(void)
+{
+ /* small values */
+ test_string_get_size_one(0, 512, "0 B", "0 B");
+ test_string_get_size_one(1, 512, "512 B", "512 B");
+ test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
+
+ /* normal values */
+ test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
+ test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
+ test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
+
+ /* weird block sizes */
+ test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
+
+ /* huge values */
+ test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
+ test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
+}
+
+static void __init test_string_upper_lower(void)
+{
+ char *dst;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(strings_upper); i++) {
+ const char *s = strings_upper[i].in;
+ int len = strlen(strings_upper[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst)
+ return;
+
+ string_upper(dst, s);
+ if (memcmp(dst, strings_upper[i].out, len)) {
+ pr_warn("Test 'string_upper' failed : expected %s, got %s!\n",
+ strings_upper[i].out, dst);
+ kfree(dst);
+ return;
+ }
+ kfree(dst);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(strings_lower); i++) {
+ const char *s = strings_lower[i].in;
+ int len = strlen(strings_lower[i].in) + 1;
+
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst)
+ return;
+
+ string_lower(dst, s);
+ if (memcmp(dst, strings_lower[i].out, len)) {
+ pr_warn("Test 'string_lower failed : : expected %s, got %s!\n",
+ strings_lower[i].out, dst);
+ kfree(dst);
+ return;
+ }
+ kfree(dst);
+ }
+}
+
+static int __init test_string_helpers_init(void)
+{
+ unsigned int i;
+
+ pr_info("Running tests...\n");
+ for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
+ test_string_unescape("unescape", i, false);
+ test_string_unescape("unescape inplace",
+ get_random_u32_below(UNESCAPE_ALL_MASK + 1), true);
+
+ /* Without dictionary */
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
+ test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
+
+ /* With dictionary */
+ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
+ test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
+
+ /* Test string_get_size() */
+ test_string_get_size();
+
+ /* Test string upper(), string_lower() */
+ test_string_upper_lower();
+
+ return -EINVAL;
+}
+module_init(test_string_helpers_init);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
new file mode 100644
index 000000000..f2ea9f30c
--- /dev/null
+++ b/lib/test_bitmap.c
@@ -0,0 +1,1256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for bitmap API.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+#define EXP1_IN_BITS (sizeof(exp1) * 8)
+
+KSTM_MODULE_GLOBALS();
+
+static char pbl_buffer[PAGE_SIZE] __initdata;
+static char print_buf[PAGE_SIZE * 2] __initdata;
+
+static const unsigned long exp1[] __initconst = {
+ BITMAP_FROM_U64(1),
+ BITMAP_FROM_U64(2),
+ BITMAP_FROM_U64(0x0000ffff),
+ BITMAP_FROM_U64(0xffff0000),
+ BITMAP_FROM_U64(0x55555555),
+ BITMAP_FROM_U64(0xaaaaaaaa),
+ BITMAP_FROM_U64(0x11111111),
+ BITMAP_FROM_U64(0x22222222),
+ BITMAP_FROM_U64(0xffffffff),
+ BITMAP_FROM_U64(0xfffffffe),
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0xffffffff77777777ULL),
+ BITMAP_FROM_U64(0),
+ BITMAP_FROM_U64(0x00008000),
+ BITMAP_FROM_U64(0x80000000),
+};
+
+static const unsigned long exp2[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0xffffffff77777777ULL),
+};
+
+/* Fibonacci sequence */
+static const unsigned long exp2_to_exp3_mask[] __initconst = {
+ BITMAP_FROM_U64(0x008000020020212eULL),
+};
+/* exp3_0_1 = (exp2[0] & ~exp2_to_exp3_mask) | (exp2[1] & exp2_to_exp3_mask) */
+static const unsigned long exp3_0_1[] __initconst = {
+ BITMAP_FROM_U64(0x33b3333311313137ULL),
+};
+/* exp3_1_0 = (exp2[1] & ~exp2_to_exp3_mask) | (exp2[0] & exp2_to_exp3_mask) */
+static const unsigned long exp3_1_0[] __initconst = {
+ BITMAP_FROM_U64(0xff7fffff77575751ULL),
+};
+
+static bool __init
+__check_eq_uint(const char *srcfile, unsigned int line,
+ const unsigned int exp_uint, unsigned int x)
+{
+ if (exp_uint != x) {
+ pr_err("[%s:%u] expected %u, got %u\n",
+ srcfile, line, exp_uint, x);
+ return false;
+ }
+ return true;
+}
+
+
+static bool __init
+__check_eq_bitmap(const char *srcfile, unsigned int line,
+ const unsigned long *exp_bmap, const unsigned long *bmap,
+ unsigned int nbits)
+{
+ if (!bitmap_equal(exp_bmap, bmap, nbits)) {
+ pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n",
+ srcfile, line,
+ nbits, exp_bmap, nbits, bmap);
+ return false;
+ }
+ return true;
+}
+
+static bool __init
+__check_eq_pbl(const char *srcfile, unsigned int line,
+ const char *expected_pbl,
+ const unsigned long *bitmap, unsigned int nbits)
+{
+ snprintf(pbl_buffer, sizeof(pbl_buffer), "%*pbl", nbits, bitmap);
+ if (strcmp(expected_pbl, pbl_buffer)) {
+ pr_warn("[%s:%u] expected \"%s\", got \"%s\"\n",
+ srcfile, line,
+ expected_pbl, pbl_buffer);
+ return false;
+ }
+ return true;
+}
+
+static bool __init
+__check_eq_u32_array(const char *srcfile, unsigned int line,
+ const u32 *exp_arr, unsigned int exp_len,
+ const u32 *arr, unsigned int len) __used;
+static bool __init
+__check_eq_u32_array(const char *srcfile, unsigned int line,
+ const u32 *exp_arr, unsigned int exp_len,
+ const u32 *arr, unsigned int len)
+{
+ if (exp_len != len) {
+ pr_warn("[%s:%u] array length differ: expected %u, got %u\n",
+ srcfile, line,
+ exp_len, len);
+ return false;
+ }
+
+ if (memcmp(exp_arr, arr, len*sizeof(*arr))) {
+ pr_warn("[%s:%u] array contents differ\n", srcfile, line);
+ print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET,
+ 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false);
+ print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET,
+ 32, 4, arr, len*sizeof(*arr), false);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
+ const unsigned int offset,
+ const unsigned int size,
+ const unsigned char *const clump_exp,
+ const unsigned long *const clump)
+{
+ unsigned long exp;
+
+ if (offset >= size) {
+ pr_warn("[%s:%u] bit offset for clump out-of-bounds: expected less than %u, got %u\n",
+ srcfile, line, size, offset);
+ return false;
+ }
+
+ exp = clump_exp[offset / 8];
+ if (!exp) {
+ pr_warn("[%s:%u] bit offset for zero clump: expected nonzero clump, got bit offset %u with clump value 0",
+ srcfile, line, offset);
+ return false;
+ }
+
+ if (*clump != exp) {
+ pr_warn("[%s:%u] expected clump value of 0x%lX, got clump value of 0x%lX",
+ srcfile, line, exp, *clump);
+ return false;
+ }
+
+ return true;
+}
+
+static bool __init
+__check_eq_str(const char *srcfile, unsigned int line,
+ const char *exp_str, const char *str,
+ unsigned int len)
+{
+ bool eq;
+
+ eq = strncmp(exp_str, str, len) == 0;
+ if (!eq)
+ pr_err("[%s:%u] expected %s, got %s\n", srcfile, line, exp_str, str);
+
+ return eq;
+}
+
+#define __expect_eq(suffix, ...) \
+ ({ \
+ int result = 0; \
+ total_tests++; \
+ if (!__check_eq_ ## suffix(__FILE__, __LINE__, \
+ ##__VA_ARGS__)) { \
+ failed_tests++; \
+ result = 1; \
+ } \
+ result; \
+ })
+
+#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__)
+#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
+#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
+#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
+#define expect_eq_clump8(...) __expect_eq(clump8, ##__VA_ARGS__)
+#define expect_eq_str(...) __expect_eq(str, ##__VA_ARGS__)
+
+static void __init test_zero_clear(void)
+{
+ DECLARE_BITMAP(bmap, 1024);
+
+ /* Known way to set all bits */
+ memset(bmap, 0xff, 128);
+
+ expect_eq_pbl("0-22", bmap, 23);
+ expect_eq_pbl("0-1023", bmap, 1024);
+
+ /* single-word bitmaps */
+ bitmap_clear(bmap, 0, 9);
+ expect_eq_pbl("9-1023", bmap, 1024);
+
+ bitmap_zero(bmap, 35);
+ expect_eq_pbl("64-1023", bmap, 1024);
+
+ /* cross boundaries operations */
+ bitmap_clear(bmap, 79, 19);
+ expect_eq_pbl("64-78,98-1023", bmap, 1024);
+
+ bitmap_zero(bmap, 115);
+ expect_eq_pbl("128-1023", bmap, 1024);
+
+ /* Zeroing entire area */
+ bitmap_zero(bmap, 1024);
+ expect_eq_pbl("", bmap, 1024);
+}
+
+static void __init test_find_nth_bit(void)
+{
+ unsigned long b, bit, cnt = 0;
+ DECLARE_BITMAP(bmap, 64 * 3);
+
+ bitmap_zero(bmap, 64 * 3);
+ __set_bit(10, bmap);
+ __set_bit(20, bmap);
+ __set_bit(30, bmap);
+ __set_bit(40, bmap);
+ __set_bit(50, bmap);
+ __set_bit(60, bmap);
+ __set_bit(80, bmap);
+ __set_bit(123, bmap);
+
+ expect_eq_uint(10, find_nth_bit(bmap, 64 * 3, 0));
+ expect_eq_uint(20, find_nth_bit(bmap, 64 * 3, 1));
+ expect_eq_uint(30, find_nth_bit(bmap, 64 * 3, 2));
+ expect_eq_uint(40, find_nth_bit(bmap, 64 * 3, 3));
+ expect_eq_uint(50, find_nth_bit(bmap, 64 * 3, 4));
+ expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
+ expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
+ expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
+ expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
+
+ expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
+ expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
+ expect_eq_uint(30, find_nth_bit(bmap, 64 * 3 - 1, 2));
+ expect_eq_uint(40, find_nth_bit(bmap, 64 * 3 - 1, 3));
+ expect_eq_uint(50, find_nth_bit(bmap, 64 * 3 - 1, 4));
+ expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
+ expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
+ expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
+ expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
+
+ for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
+ b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);
+ expect_eq_uint(b, bit);
+ }
+}
+
+static void __init test_fill_set(void)
+{
+ DECLARE_BITMAP(bmap, 1024);
+
+ /* Known way to clear all bits */
+ memset(bmap, 0x00, 128);
+
+ expect_eq_pbl("", bmap, 23);
+ expect_eq_pbl("", bmap, 1024);
+
+ /* single-word bitmaps */
+ bitmap_set(bmap, 0, 9);
+ expect_eq_pbl("0-8", bmap, 1024);
+
+ bitmap_fill(bmap, 35);
+ expect_eq_pbl("0-63", bmap, 1024);
+
+ /* cross boundaries operations */
+ bitmap_set(bmap, 79, 19);
+ expect_eq_pbl("0-63,79-97", bmap, 1024);
+
+ bitmap_fill(bmap, 115);
+ expect_eq_pbl("0-127", bmap, 1024);
+
+ /* Zeroing entire area */
+ bitmap_fill(bmap, 1024);
+ expect_eq_pbl("0-1023", bmap, 1024);
+}
+
+static void __init test_copy(void)
+{
+ DECLARE_BITMAP(bmap1, 1024);
+ DECLARE_BITMAP(bmap2, 1024);
+
+ bitmap_zero(bmap1, 1024);
+ bitmap_zero(bmap2, 1024);
+
+ /* single-word bitmaps */
+ bitmap_set(bmap1, 0, 19);
+ bitmap_copy(bmap2, bmap1, 23);
+ expect_eq_pbl("0-18", bmap2, 1024);
+
+ bitmap_set(bmap2, 0, 23);
+ bitmap_copy(bmap2, bmap1, 23);
+ expect_eq_pbl("0-18", bmap2, 1024);
+
+ /* multi-word bitmaps */
+ bitmap_set(bmap1, 0, 109);
+ bitmap_copy(bmap2, bmap1, 1024);
+ expect_eq_pbl("0-108", bmap2, 1024);
+
+ bitmap_fill(bmap2, 1024);
+ bitmap_copy(bmap2, bmap1, 1024);
+ expect_eq_pbl("0-108", bmap2, 1024);
+
+ /* the following tests assume a 32- or 64-bit arch (even 128b
+ * if we care)
+ */
+
+ bitmap_fill(bmap2, 1024);
+ bitmap_copy(bmap2, bmap1, 109); /* ... but 0-padded til word length */
+ expect_eq_pbl("0-108,128-1023", bmap2, 1024);
+
+ bitmap_fill(bmap2, 1024);
+ bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */
+ expect_eq_pbl("0-108,128-1023", bmap2, 1024);
+}
+
+#define EXP2_IN_BITS (sizeof(exp2) * 8)
+
+static void __init test_replace(void)
+{
+ unsigned int nbits = 64;
+ unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG);
+ DECLARE_BITMAP(bmap, 1024);
+
+ BUILD_BUG_ON(EXP2_IN_BITS < nbits * 2);
+
+ bitmap_zero(bmap, 1024);
+ bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_0_1, nbits);
+
+ bitmap_zero(bmap, 1024);
+ bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_1_0, nbits);
+
+ bitmap_fill(bmap, 1024);
+ bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_0_1, nbits);
+
+ bitmap_fill(bmap, 1024);
+ bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_1_0, nbits);
+}
+
+#define PARSE_TIME 0x1
+#define NO_LEN 0x2
+
+struct test_bitmap_parselist{
+ const int errno;
+ const char *in;
+ const unsigned long *expected;
+ const int nbits;
+ const int flags;
+};
+
+static const struct test_bitmap_parselist parselist_tests[] __initconst = {
+#define step (sizeof(u64) / sizeof(unsigned long))
+
+ {0, "0", &exp1[0], 8, 0},
+ {0, "1", &exp1[1 * step], 8, 0},
+ {0, "0-15", &exp1[2 * step], 32, 0},
+ {0, "16-31", &exp1[3 * step], 32, 0},
+ {0, "0-31:1/2", &exp1[4 * step], 32, 0},
+ {0, "1-31:1/2", &exp1[5 * step], 32, 0},
+ {0, "0-31:1/4", &exp1[6 * step], 32, 0},
+ {0, "1-31:1/4", &exp1[7 * step], 32, 0},
+ {0, "0-31:4/4", &exp1[8 * step], 32, 0},
+ {0, "1-31:4/4", &exp1[9 * step], 32, 0},
+ {0, "0-31:1/4,32-63:2/4", &exp1[10 * step], 64, 0},
+ {0, "0-31:3/4,32-63:4/4", &exp1[11 * step], 64, 0},
+ {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp1[11 * step], 64, 0},
+
+ {0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0},
+
+ {0, "0-2047:128/256", NULL, 2048, PARSE_TIME},
+
+ {0, "", &exp1[12 * step], 8, 0},
+ {0, "\n", &exp1[12 * step], 8, 0},
+ {0, ",, ,, , , ,", &exp1[12 * step], 8, 0},
+ {0, " , ,, , , ", &exp1[12 * step], 8, 0},
+ {0, " , ,, , , \n", &exp1[12 * step], 8, 0},
+
+ {0, "0-0", &exp1[0], 32, 0},
+ {0, "1-1", &exp1[1 * step], 32, 0},
+ {0, "15-15", &exp1[13 * step], 32, 0},
+ {0, "31-31", &exp1[14 * step], 32, 0},
+
+ {0, "0-0:0/1", &exp1[12 * step], 32, 0},
+ {0, "0-0:1/1", &exp1[0], 32, 0},
+ {0, "0-0:1/31", &exp1[0], 32, 0},
+ {0, "0-0:31/31", &exp1[0], 32, 0},
+ {0, "1-1:1/1", &exp1[1 * step], 32, 0},
+ {0, "0-15:16/31", &exp1[2 * step], 32, 0},
+ {0, "15-15:1/2", &exp1[13 * step], 32, 0},
+ {0, "15-15:31/31", &exp1[13 * step], 32, 0},
+ {0, "15-31:1/31", &exp1[13 * step], 32, 0},
+ {0, "16-31:16/31", &exp1[3 * step], 32, 0},
+ {0, "31-31:31/31", &exp1[14 * step], 32, 0},
+
+ {0, "N-N", &exp1[14 * step], 32, 0},
+ {0, "0-0:1/N", &exp1[0], 32, 0},
+ {0, "0-0:N/N", &exp1[0], 32, 0},
+ {0, "0-15:16/N", &exp1[2 * step], 32, 0},
+ {0, "15-15:N/N", &exp1[13 * step], 32, 0},
+ {0, "15-N:1/N", &exp1[13 * step], 32, 0},
+ {0, "16-N:16/N", &exp1[3 * step], 32, 0},
+ {0, "N-N:N/N", &exp1[14 * step], 32, 0},
+
+ {0, "0-N:1/3,1-N:1/3,2-N:1/3", &exp1[8 * step], 32, 0},
+ {0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0},
+ {0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0},
+
+ {0, "all", &exp1[8 * step], 32, 0},
+ {0, "0, 1, all, ", &exp1[8 * step], 32, 0},
+ {0, "all:1/2", &exp1[4 * step], 32, 0},
+ {0, "ALL:1/2", &exp1[4 * step], 32, 0},
+ {-EINVAL, "al", NULL, 8, 0},
+ {-EINVAL, "alll", NULL, 8, 0},
+
+ {-EINVAL, "-1", NULL, 8, 0},
+ {-EINVAL, "-0", NULL, 8, 0},
+ {-EINVAL, "10-1", NULL, 8, 0},
+ {-ERANGE, "8-8", NULL, 8, 0},
+ {-ERANGE, "0-31", NULL, 8, 0},
+ {-EINVAL, "0-31:", NULL, 32, 0},
+ {-EINVAL, "0-31:0", NULL, 32, 0},
+ {-EINVAL, "0-31:0/", NULL, 32, 0},
+ {-EINVAL, "0-31:0/0", NULL, 32, 0},
+ {-EINVAL, "0-31:1/0", NULL, 32, 0},
+ {-EINVAL, "0-31:10/1", NULL, 32, 0},
+ {-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0},
+
+ {-EINVAL, "a-31", NULL, 8, 0},
+ {-EINVAL, "0-a1", NULL, 8, 0},
+ {-EINVAL, "a-31:10/1", NULL, 8, 0},
+ {-EINVAL, "0-31:a/1", NULL, 8, 0},
+ {-EINVAL, "0-\n", NULL, 8, 0},
+
+};
+
+static void __init test_bitmap_parselist(void)
+{
+ int i;
+ int err;
+ ktime_t time;
+ DECLARE_BITMAP(bmap, 2048);
+
+ for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
+#define ptest parselist_tests[i]
+
+ time = ktime_get();
+ err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
+ time = ktime_get() - time;
+
+ if (err != ptest.errno) {
+ pr_err("parselist: %d: input is %s, errno is %d, expected %d\n",
+ i, ptest.in, err, ptest.errno);
+ failed_tests++;
+ continue;
+ }
+
+ if (!err && ptest.expected
+ && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
+ pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, ptest.in, bmap[0],
+ *ptest.expected);
+ failed_tests++;
+ continue;
+ }
+
+ if (ptest.flags & PARSE_TIME)
+ pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ i, ptest.in, time);
+
+#undef ptest
+ }
+}
+
+static void __init test_bitmap_printlist(void)
+{
+ unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ char expected[256];
+ int ret, slen;
+ ktime_t time;
+
+ if (!buf || !bmap)
+ goto out;
+
+ memset(bmap, -1, PAGE_SIZE);
+ slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1);
+ if (slen < 0)
+ goto out;
+
+ time = ktime_get();
+ ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8);
+ time = ktime_get() - time;
+
+ if (ret != slen + 1) {
+ pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen);
+ failed_tests++;
+ goto out;
+ }
+
+ if (strncmp(buf, expected, slen)) {
+ pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected);
+ failed_tests++;
+ goto out;
+ }
+
+ pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
+out:
+ kfree(buf);
+ kfree(bmap);
+}
+
+static const unsigned long parse_test[] __initconst = {
+ BITMAP_FROM_U64(0),
+ BITMAP_FROM_U64(1),
+ BITMAP_FROM_U64(0xdeadbeef),
+ BITMAP_FROM_U64(0x100000000ULL),
+};
+
+static const unsigned long parse_test2[] __initconst = {
+ BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xdeadbeef),
+ BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xbaadf00ddeadbeef),
+ BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0x0badf00ddeadbeef),
+};
+
+static const struct test_bitmap_parselist parse_tests[] __initconst = {
+ {0, "", &parse_test[0 * step], 32, 0},
+ {0, " ", &parse_test[0 * step], 32, 0},
+ {0, "0", &parse_test[0 * step], 32, 0},
+ {0, "0\n", &parse_test[0 * step], 32, 0},
+ {0, "1", &parse_test[1 * step], 32, 0},
+ {0, "deadbeef", &parse_test[2 * step], 32, 0},
+ {0, "1,0", &parse_test[3 * step], 33, 0},
+ {0, "deadbeef,\n,0,1", &parse_test[2 * step], 96, 0},
+
+ {0, "deadbeef,1,0", &parse_test2[0 * 2 * step], 96, 0},
+ {0, "baadf00d,deadbeef,1,0", &parse_test2[1 * 2 * step], 128, 0},
+ {0, "badf00d,deadbeef,1,0", &parse_test2[2 * 2 * step], 124, 0},
+ {0, "badf00d,deadbeef,1,0", &parse_test2[2 * 2 * step], 124, NO_LEN},
+ {0, " badf00d,deadbeef,1,0 ", &parse_test2[2 * 2 * step], 124, 0},
+ {0, " , badf00d,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0},
+ {0, " , badf00d, ,, ,,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0},
+
+ {-EINVAL, "goodfood,deadbeef,1,0", NULL, 128, 0},
+ {-EOVERFLOW, "3,0", NULL, 33, 0},
+ {-EOVERFLOW, "123badf00d,deadbeef,1,0", NULL, 128, 0},
+ {-EOVERFLOW, "badf00d,deadbeef,1,0", NULL, 90, 0},
+ {-EOVERFLOW, "fbadf00d,deadbeef,1,0", NULL, 95, 0},
+ {-EOVERFLOW, "badf00d,deadbeef,1,0", NULL, 100, 0},
+#undef step
+};
+
+static void __init test_bitmap_parse(void)
+{
+ int i;
+ int err;
+ ktime_t time;
+ DECLARE_BITMAP(bmap, 2048);
+
+ for (i = 0; i < ARRAY_SIZE(parse_tests); i++) {
+ struct test_bitmap_parselist test = parse_tests[i];
+ size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in);
+
+ time = ktime_get();
+ err = bitmap_parse(test.in, len, bmap, test.nbits);
+ time = ktime_get() - time;
+
+ if (err != test.errno) {
+ pr_err("parse: %d: input is %s, errno is %d, expected %d\n",
+ i, test.in, err, test.errno);
+ failed_tests++;
+ continue;
+ }
+
+ if (!err && test.expected
+ && !__bitmap_equal(bmap, test.expected, test.nbits)) {
+ pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
+ i, test.in, bmap[0],
+ *test.expected);
+ failed_tests++;
+ continue;
+ }
+
+ if (test.flags & PARSE_TIME)
+ pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ i, test.in, time);
+ }
+}
+
+static void __init test_bitmap_arr32(void)
+{
+ unsigned int nbits, next_bit;
+ u32 arr[EXP1_IN_BITS / 32];
+ DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
+
+ memset(arr, 0xa5, sizeof(arr));
+
+ for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+ bitmap_to_arr32(arr, exp1, nbits);
+ bitmap_from_arr32(bmap2, arr, nbits);
+ expect_eq_bitmap(bmap2, exp1, nbits);
+
+ next_bit = find_next_bit(bmap2,
+ round_up(nbits, BITS_PER_LONG), nbits);
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
+ pr_err("bitmap_copy_arr32(nbits == %d:"
+ " tail is not safely cleared: %d\n",
+ nbits, next_bit);
+ failed_tests++;
+ }
+
+ if (nbits < EXP1_IN_BITS - 32)
+ expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
+ 0xa5a5a5a5);
+ }
+}
+
+static void __init test_bitmap_arr64(void)
+{
+ unsigned int nbits, next_bit;
+ u64 arr[EXP1_IN_BITS / 64];
+ DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
+
+ memset(arr, 0xa5, sizeof(arr));
+
+ for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+ memset(bmap2, 0xff, sizeof(arr));
+ bitmap_to_arr64(arr, exp1, nbits);
+ bitmap_from_arr64(bmap2, arr, nbits);
+ expect_eq_bitmap(bmap2, exp1, nbits);
+
+ next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits);
+ if (next_bit < round_up(nbits, BITS_PER_LONG)) {
+ pr_err("bitmap_copy_arr64(nbits == %d:"
+ " tail is not safely cleared: %d\n", nbits, next_bit);
+ failed_tests++;
+ }
+
+ if ((nbits % 64) &&
+ (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) {
+ pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n",
+ nbits, arr[(nbits - 1) / 64],
+ GENMASK_ULL((nbits - 1) % 64, 0));
+ failed_tests++;
+ }
+
+ if (nbits < EXP1_IN_BITS - 64)
+ expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5);
+ }
+}
+
+static void noinline __init test_mem_optimisations(void)
+{
+ DECLARE_BITMAP(bmap1, 1024);
+ DECLARE_BITMAP(bmap2, 1024);
+ unsigned int start, nbits;
+
+ for (start = 0; start < 1024; start += 8) {
+ for (nbits = 0; nbits < 1024 - start; nbits += 8) {
+ memset(bmap1, 0x5a, sizeof(bmap1));
+ memset(bmap2, 0x5a, sizeof(bmap2));
+
+ bitmap_set(bmap1, start, nbits);
+ __bitmap_set(bmap2, start, nbits);
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
+ printk("set not equal %d %d\n", start, nbits);
+ failed_tests++;
+ }
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
+ printk("set not __equal %d %d\n", start, nbits);
+ failed_tests++;
+ }
+
+ bitmap_clear(bmap1, start, nbits);
+ __bitmap_clear(bmap2, start, nbits);
+ if (!bitmap_equal(bmap1, bmap2, 1024)) {
+ printk("clear not equal %d %d\n", start, nbits);
+ failed_tests++;
+ }
+ if (!__bitmap_equal(bmap1, bmap2, 1024)) {
+ printk("clear not __equal %d %d\n", start,
+ nbits);
+ failed_tests++;
+ }
+ }
+ }
+}
+
+static const unsigned char clump_exp[] __initconst = {
+ 0x01, /* 1 bit set */
+ 0x02, /* non-edge 1 bit set */
+ 0x00, /* zero bits set */
+ 0x38, /* 3 bits set across 4-bit boundary */
+ 0x38, /* Repeated clump */
+ 0x0F, /* 4 bits set */
+ 0xFF, /* all bits set */
+ 0x05, /* non-adjacent 2 bits set */
+};
+
+static void __init test_for_each_set_clump8(void)
+{
+#define CLUMP_EXP_NUMBITS 64
+ DECLARE_BITMAP(bits, CLUMP_EXP_NUMBITS);
+ unsigned int start;
+ unsigned long clump;
+
+ /* set bitmap to test case */
+ bitmap_zero(bits, CLUMP_EXP_NUMBITS);
+ bitmap_set(bits, 0, 1); /* 0x01 */
+ bitmap_set(bits, 9, 1); /* 0x02 */
+ bitmap_set(bits, 27, 3); /* 0x28 */
+ bitmap_set(bits, 35, 3); /* 0x28 */
+ bitmap_set(bits, 40, 4); /* 0x0F */
+ bitmap_set(bits, 48, 8); /* 0xFF */
+ bitmap_set(bits, 56, 1); /* 0x05 - part 1 */
+ bitmap_set(bits, 58, 1); /* 0x05 - part 2 */
+
+ for_each_set_clump8(start, clump, bits, CLUMP_EXP_NUMBITS)
+ expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
+}
+
+static void __init test_for_each_set_bit_wrap(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ bitmap_zero(copy, 500);
+
+ for_each_set_bit_wrap(bit, orig, 500, wr)
+ bitmap_set(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+ }
+}
+
+static void __init test_for_each_set_bit(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int bit;
+
+ bitmap_zero(orig, 500);
+ bitmap_zero(copy, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for_each_set_bit(bit, orig, 500)
+ bitmap_set(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_set_bit_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_set(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_zero(copy, 500);
+ bit = wr;
+
+ for_each_set_bit_from(bit, orig, 500)
+ bitmap_set(copy, bit, 1);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_clear(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_clear_bit(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int bit;
+
+ bitmap_fill(orig, 500);
+ bitmap_fill(copy, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_clear(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for_each_clear_bit(bit, orig, 500)
+ bitmap_clear(copy, bit, 1);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_clear_bit_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, bit;
+
+ bitmap_fill(orig, 500);
+
+ /* Set individual bits */
+ for (bit = 0; bit < 500; bit += 10)
+ bitmap_clear(orig, bit, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_fill(copy, 500);
+ bit = wr;
+
+ for_each_clear_bit_from(bit, orig, 500)
+ bitmap_clear(copy, bit, 1);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_set(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_set_bitrange(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int s, e;
+
+ bitmap_zero(orig, 500);
+ bitmap_zero(copy, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_set(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for_each_set_bitrange(s, e, orig, 500)
+ bitmap_set(copy, s, e-s);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_clear_bitrange(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int s, e;
+
+ bitmap_fill(orig, 500);
+ bitmap_fill(copy, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_clear(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_clear(orig, 100, 50);
+
+ for_each_clear_bitrange(s, e, orig, 500)
+ bitmap_clear(copy, s, e-s);
+
+ expect_eq_bitmap(orig, copy, 500);
+}
+
+static void __init test_for_each_set_bitrange_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, s, e;
+
+ bitmap_zero(orig, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_set(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_zero(copy, 500);
+ s = wr;
+
+ for_each_set_bitrange_from(s, e, orig, 500)
+ bitmap_set(copy, s, e - s);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_clear(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+static void __init test_for_each_clear_bitrange_from(void)
+{
+ DECLARE_BITMAP(orig, 500);
+ DECLARE_BITMAP(copy, 500);
+ unsigned int wr, s, e;
+
+ bitmap_fill(orig, 500);
+
+ /* Set individual bits */
+ for (s = 0; s < 500; s += 10)
+ bitmap_clear(orig, s, 1);
+
+ /* Set range of bits */
+ bitmap_set(orig, 100, 50);
+
+ for (wr = 0; wr < 500; wr++) {
+ DECLARE_BITMAP(tmp, 500);
+
+ bitmap_fill(copy, 500);
+ s = wr;
+
+ for_each_clear_bitrange_from(s, e, orig, 500)
+ bitmap_clear(copy, s, e - s);
+
+ bitmap_copy(tmp, orig, 500);
+ bitmap_set(tmp, 0, wr);
+ expect_eq_bitmap(tmp, copy, 500);
+ }
+}
+
+struct test_bitmap_cut {
+ unsigned int first;
+ unsigned int cut;
+ unsigned int nbits;
+ unsigned long in[4];
+ unsigned long expected[4];
+};
+
+static struct test_bitmap_cut test_cut[] = {
+ { 0, 0, 8, { 0x0000000aUL, }, { 0x0000000aUL, }, },
+ { 0, 0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, },
+ { 0, 3, 8, { 0x000000aaUL, }, { 0x00000015UL, }, },
+ { 3, 3, 8, { 0x000000aaUL, }, { 0x00000012UL, }, },
+ { 0, 1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, },
+ { 0, 8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, },
+ { 1, 1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, },
+ { 0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, },
+ { 0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, },
+ { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, },
+ { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, },
+
+ { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ },
+ { 1, BITS_PER_LONG - 1, BITS_PER_LONG,
+ { 0xa5a5a5a5UL, 0xa5a5a5a5UL, },
+ { 0x00000001UL, 0x00000001UL, },
+ },
+
+ { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1,
+ { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL },
+ { 0x00000001UL, },
+ },
+ { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16,
+ { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL },
+ { 0x2d2dffffUL, },
+ },
+};
+
+static void __init test_bitmap_cut(void)
+{
+ unsigned long b[5], *in = &b[1], *out = &b[0]; /* Partial overlap */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_cut); i++) {
+ struct test_bitmap_cut *t = &test_cut[i];
+
+ memcpy(in, t->in, sizeof(t->in));
+
+ bitmap_cut(out, in, t->first, t->cut, t->nbits);
+
+ expect_eq_bitmap(t->expected, out, t->nbits);
+ }
+}
+
+struct test_bitmap_print {
+ const unsigned long *bitmap;
+ unsigned long nbits;
+ const char *mask;
+ const char *list;
+};
+
+static const unsigned long small_bitmap[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+};
+
+static const char small_mask[] __initconst = "33333333,11111111\n";
+static const char small_list[] __initconst = "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61\n";
+
+static const unsigned long large_bitmap[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+};
+
+static const char large_mask[] __initconst = "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111\n";
+
+static const char large_list[] __initconst = /* more than 4KB */
+ "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61,64,68,72,76,80,84,88,92,96-97,100-101,104-1"
+ "05,108-109,112-113,116-117,120-121,124-125,128,132,136,140,144,148,152,156,160-161,164-165,168-169,172-173,176-1"
+ "77,180-181,184-185,188-189,192,196,200,204,208,212,216,220,224-225,228-229,232-233,236-237,240-241,244-245,248-2"
+ "49,252-253,256,260,264,268,272,276,280,284,288-289,292-293,296-297,300-301,304-305,308-309,312-313,316-317,320,3"
+ "24,328,332,336,340,344,348,352-353,356-357,360-361,364-365,368-369,372-373,376-377,380-381,384,388,392,396,400,4"
+ "04,408,412,416-417,420-421,424-425,428-429,432-433,436-437,440-441,444-445,448,452,456,460,464,468,472,476,480-4"
+ "81,484-485,488-489,492-493,496-497,500-501,504-505,508-509,512,516,520,524,528,532,536,540,544-545,548-549,552-5"
+ "53,556-557,560-561,564-565,568-569,572-573,576,580,584,588,592,596,600,604,608-609,612-613,616-617,620-621,624-6"
+ "25,628-629,632-633,636-637,640,644,648,652,656,660,664,668,672-673,676-677,680-681,684-685,688-689,692-693,696-6"
+ "97,700-701,704,708,712,716,720,724,728,732,736-737,740-741,744-745,748-749,752-753,756-757,760-761,764-765,768,7"
+ "72,776,780,784,788,792,796,800-801,804-805,808-809,812-813,816-817,820-821,824-825,828-829,832,836,840,844,848,8"
+ "52,856,860,864-865,868-869,872-873,876-877,880-881,884-885,888-889,892-893,896,900,904,908,912,916,920,924,928-9"
+ "29,932-933,936-937,940-941,944-945,948-949,952-953,956-957,960,964,968,972,976,980,984,988,992-993,996-997,1000-"
+ "1001,1004-1005,1008-1009,1012-1013,1016-1017,1020-1021,1024,1028,1032,1036,1040,1044,1048,1052,1056-1057,1060-10"
+ "61,1064-1065,1068-1069,1072-1073,1076-1077,1080-1081,1084-1085,1088,1092,1096,1100,1104,1108,1112,1116,1120-1121"
+ ",1124-1125,1128-1129,1132-1133,1136-1137,1140-1141,1144-1145,1148-1149,1152,1156,1160,1164,1168,1172,1176,1180,1"
+ "184-1185,1188-1189,1192-1193,1196-1197,1200-1201,1204-1205,1208-1209,1212-1213,1216,1220,1224,1228,1232,1236,124"
+ "0,1244,1248-1249,1252-1253,1256-1257,1260-1261,1264-1265,1268-1269,1272-1273,1276-1277,1280,1284,1288,1292,1296,"
+ "1300,1304,1308,1312-1313,1316-1317,1320-1321,1324-1325,1328-1329,1332-1333,1336-1337,1340-1341,1344,1348,1352,13"
+ "56,1360,1364,1368,1372,1376-1377,1380-1381,1384-1385,1388-1389,1392-1393,1396-1397,1400-1401,1404-1405,1408,1412"
+ ",1416,1420,1424,1428,1432,1436,1440-1441,1444-1445,1448-1449,1452-1453,1456-1457,1460-1461,1464-1465,1468-1469,1"
+ "472,1476,1480,1484,1488,1492,1496,1500,1504-1505,1508-1509,1512-1513,1516-1517,1520-1521,1524-1525,1528-1529,153"
+ "2-1533,1536,1540,1544,1548,1552,1556,1560,1564,1568-1569,1572-1573,1576-1577,1580-1581,1584-1585,1588-1589,1592-"
+ "1593,1596-1597,1600,1604,1608,1612,1616,1620,1624,1628,1632-1633,1636-1637,1640-1641,1644-1645,1648-1649,1652-16"
+ "53,1656-1657,1660-1661,1664,1668,1672,1676,1680,1684,1688,1692,1696-1697,1700-1701,1704-1705,1708-1709,1712-1713"
+ ",1716-1717,1720-1721,1724-1725,1728,1732,1736,1740,1744,1748,1752,1756,1760-1761,1764-1765,1768-1769,1772-1773,1"
+ "776-1777,1780-1781,1784-1785,1788-1789,1792,1796,1800,1804,1808,1812,1816,1820,1824-1825,1828-1829,1832-1833,183"
+ "6-1837,1840-1841,1844-1845,1848-1849,1852-1853,1856,1860,1864,1868,1872,1876,1880,1884,1888-1889,1892-1893,1896-"
+ "1897,1900-1901,1904-1905,1908-1909,1912-1913,1916-1917,1920,1924,1928,1932,1936,1940,1944,1948,1952-1953,1956-19"
+ "57,1960-1961,1964-1965,1968-1969,1972-1973,1976-1977,1980-1981,1984,1988,1992,1996,2000,2004,2008,2012,2016-2017"
+ ",2020-2021,2024-2025,2028-2029,2032-2033,2036-2037,2040-2041,2044-2045,2048,2052,2056,2060,2064,2068,2072,2076,2"
+ "080-2081,2084-2085,2088-2089,2092-2093,2096-2097,2100-2101,2104-2105,2108-2109,2112,2116,2120,2124,2128,2132,213"
+ "6,2140,2144-2145,2148-2149,2152-2153,2156-2157,2160-2161,2164-2165,2168-2169,2172-2173,2176,2180,2184,2188,2192,"
+ "2196,2200,2204,2208-2209,2212-2213,2216-2217,2220-2221,2224-2225,2228-2229,2232-2233,2236-2237,2240,2244,2248,22"
+ "52,2256,2260,2264,2268,2272-2273,2276-2277,2280-2281,2284-2285,2288-2289,2292-2293,2296-2297,2300-2301,2304,2308"
+ ",2312,2316,2320,2324,2328,2332,2336-2337,2340-2341,2344-2345,2348-2349,2352-2353,2356-2357,2360-2361,2364-2365,2"
+ "368,2372,2376,2380,2384,2388,2392,2396,2400-2401,2404-2405,2408-2409,2412-2413,2416-2417,2420-2421,2424-2425,242"
+ "8-2429,2432,2436,2440,2444,2448,2452,2456,2460,2464-2465,2468-2469,2472-2473,2476-2477,2480-2481,2484-2485,2488-"
+ "2489,2492-2493,2496,2500,2504,2508,2512,2516,2520,2524,2528-2529,2532-2533,2536-2537,2540-2541,2544-2545,2548-25"
+ "49,2552-2553,2556-2557\n";
+
+static const struct test_bitmap_print test_print[] __initconst = {
+ { small_bitmap, sizeof(small_bitmap) * BITS_PER_BYTE, small_mask, small_list },
+ { large_bitmap, sizeof(large_bitmap) * BITS_PER_BYTE, large_mask, large_list },
+};
+
+static void __init test_bitmap_print_buf(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_print); i++) {
+ const struct test_bitmap_print *t = &test_print[i];
+ int n;
+
+ n = bitmap_print_bitmask_to_buf(print_buf, t->bitmap, t->nbits,
+ 0, 2 * PAGE_SIZE);
+ expect_eq_uint(strlen(t->mask) + 1, n);
+ expect_eq_str(t->mask, print_buf, n);
+
+ n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits,
+ 0, 2 * PAGE_SIZE);
+ expect_eq_uint(strlen(t->list) + 1, n);
+ expect_eq_str(t->list, print_buf, n);
+
+ /* test by non-zero offset */
+ if (strlen(t->list) > PAGE_SIZE) {
+ n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits,
+ PAGE_SIZE, PAGE_SIZE);
+ expect_eq_uint(strlen(t->list) + 1 - PAGE_SIZE, n);
+ expect_eq_str(t->list + PAGE_SIZE, print_buf, n);
+ }
+ }
+}
+
+/*
+ * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled.
+ * To workaround it, GCOV is force-disabled in Makefile for this configuration.
+ */
+static void __init test_bitmap_const_eval(void)
+{
+ DECLARE_BITMAP(bitmap, BITS_PER_LONG);
+ unsigned long initvar = BIT(2);
+ unsigned long bitopvar = 0;
+ unsigned long var = 0;
+ int res;
+
+ /*
+ * Compilers must be able to optimize all of those to compile-time
+ * constants on any supported optimization level (-O2, -Os) and any
+ * architecture. Otherwise, trigger a build bug.
+ * The whole function gets optimized out then, there's nothing to do
+ * in runtime.
+ */
+
+ /*
+ * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
+ * Clang on s390 optimizes bitops at compile-time as intended, but at
+ * the same time stops treating @bitmap and @bitopvar as compile-time
+ * constants after regular test_bit() is executed, thus triggering the
+ * build bugs below. So, call const_test_bit() there directly until
+ * the compiler is fixed.
+ */
+ bitmap_clear(bitmap, 0, BITS_PER_LONG);
+ if (!test_bit(7, bitmap))
+ bitmap_set(bitmap, 5, 2);
+
+ /* Equals to `unsigned long bitopvar = BIT(20)` */
+ __change_bit(31, &bitopvar);
+ bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG);
+
+ /* Equals to `unsigned long var = BIT(25)` */
+ var |= BIT(25);
+ if (var & BIT(0))
+ var ^= GENMASK(9, 6);
+
+ /* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */
+ res = bitmap_weight(bitmap, 20);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(res != 2);
+
+ /* !(BIT(31) & BIT(18)) == 1 */
+ res = !test_bit(18, &bitopvar);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(!res);
+
+ /* BIT(2) & GENMASK(14, 8) == 0 */
+ res = initvar & GENMASK(14, 8);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(res);
+
+ /* ~BIT(25) */
+ BUILD_BUG_ON(!__builtin_constant_p(~var));
+ BUILD_BUG_ON(~var != ~BIT(25));
+}
+
+static void __init selftest(void)
+{
+ test_zero_clear();
+ test_fill_set();
+ test_copy();
+ test_replace();
+ test_bitmap_arr32();
+ test_bitmap_arr64();
+ test_bitmap_parse();
+ test_bitmap_parselist();
+ test_bitmap_printlist();
+ test_mem_optimisations();
+ test_bitmap_cut();
+ test_bitmap_print_buf();
+ test_bitmap_const_eval();
+
+ test_find_nth_bit();
+ test_for_each_set_bit();
+ test_for_each_set_bit_from();
+ test_for_each_clear_bit();
+ test_for_each_clear_bit_from();
+ test_for_each_set_bitrange();
+ test_for_each_clear_bitrange();
+ test_for_each_set_bitrange_from();
+ test_for_each_clear_bitrange_from();
+ test_for_each_set_clump8();
+ test_for_each_set_bit_wrap();
+}
+
+KSTM_MODULE_LOADERS(test_bitmap);
+MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
new file mode 100644
index 000000000..3b7bcbee8
--- /dev/null
+++ b/lib/test_bitops.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+/* a tiny module only meant to test
+ *
+ * set/clear_bit
+ * get_count_order/long
+ */
+
+/* use an enum because that's the most common BITMAP usage */
+enum bitops_fun {
+ BITOPS_4 = 4,
+ BITOPS_7 = 7,
+ BITOPS_11 = 11,
+ BITOPS_31 = 31,
+ BITOPS_88 = 88,
+ BITOPS_LAST = 255,
+ BITOPS_LENGTH = 256
+};
+
+static DECLARE_BITMAP(g_bitmap, BITOPS_LENGTH);
+
+static unsigned int order_comb[][2] = {
+ {0x00000003, 2},
+ {0x00000004, 2},
+ {0x00001fff, 13},
+ {0x00002000, 13},
+ {0x50000000, 31},
+ {0x80000000, 31},
+ {0x80003000, 32},
+};
+
+#ifdef CONFIG_64BIT
+static unsigned long order_comb_long[][2] = {
+ {0x0000000300000000, 34},
+ {0x0000000400000000, 34},
+ {0x00001fff00000000, 45},
+ {0x0000200000000000, 45},
+ {0x5000000000000000, 63},
+ {0x8000000000000000, 63},
+ {0x8000300000000000, 64},
+};
+#endif
+
+static int __init test_bitops_startup(void)
+{
+ int i, bit_set;
+
+ pr_info("Starting bitops test\n");
+ set_bit(BITOPS_4, g_bitmap);
+ set_bit(BITOPS_7, g_bitmap);
+ set_bit(BITOPS_11, g_bitmap);
+ set_bit(BITOPS_31, g_bitmap);
+ set_bit(BITOPS_88, g_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order(order_comb[i][0]))
+ pr_warn("get_count_order wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order_long(order_comb[i][0]))
+ pr_warn("get_count_order_long wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+#ifdef CONFIG_64BIT
+ for (i = 0; i < ARRAY_SIZE(order_comb_long); i++) {
+ if (order_comb_long[i][1] !=
+ get_count_order_long(order_comb_long[i][0]))
+ pr_warn("get_count_order_long wrong for %lx\n",
+ order_comb_long[i][0]);
+ }
+#endif
+
+ barrier();
+
+ clear_bit(BITOPS_4, g_bitmap);
+ clear_bit(BITOPS_7, g_bitmap);
+ clear_bit(BITOPS_11, g_bitmap);
+ clear_bit(BITOPS_31, g_bitmap);
+ clear_bit(BITOPS_88, g_bitmap);
+
+ bit_set = find_first_bit(g_bitmap, BITOPS_LAST);
+ if (bit_set != BITOPS_LAST)
+ pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
+
+ pr_info("Completed bitops test\n");
+
+ return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
+}
+
+module_init(test_bitops_startup);
+module_exit(test_bitops_unstartup);
+
+MODULE_AUTHOR("Jesse Brandeburg <jesse.brandeburg@intel.com>, Wei Yang <richard.weiyang@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bit testing module");
diff --git a/lib/test_bits.c b/lib/test_bits.c
new file mode 100644
index 000000000..c9368a231
--- /dev/null
+++ b/lib/test_bits.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for functions and macros in bits.h
+ */
+
+#include <kunit/test.h>
+#include <linux/bits.h>
+
+
+static void genmask_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
+ KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
+ KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK(0, 1);
+ GENMASK(0, 10);
+ GENMASK(9, 10);
+#endif
+
+
+}
+
+static void genmask_ull_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
+ KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
+ KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
+ KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
+
+#ifdef TEST_GENMASK_FAILURES
+ /* these should fail compilation */
+ GENMASK_ULL(0, 1);
+ GENMASK_ULL(0, 10);
+ GENMASK_ULL(9, 10);
+#endif
+}
+
+static void genmask_input_check_test(struct kunit *test)
+{
+ unsigned int x, y;
+ int z, w;
+
+ /* Unknown input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
+
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
+
+ /* Valid input */
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
+}
+
+
+static struct kunit_case bits_test_cases[] = {
+ KUNIT_CASE(genmask_test),
+ KUNIT_CASE(genmask_ull_test),
+ KUNIT_CASE(genmask_input_check_test),
+ {}
+};
+
+static struct kunit_suite bits_test_suite = {
+ .name = "bits-test",
+ .test_cases = bits_test_cases,
+};
+kunit_test_suite(bits_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
new file mode 100644
index 000000000..4c40580a9
--- /dev/null
+++ b/lib/test_blackhole_dev.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This module tests the blackhole_dev that is created during the
+ * net subsystem initialization. The test this module performs is
+ * by injecting an skb into the stack with skb->dev as the
+ * blackhole_dev and expects kernel to behave in a sane manner
+ * (in other words, *not crash*)!
+ *
+ * Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+
+#include <net/dst.h>
+
+#define SKB_SIZE 256
+#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
+#define TAIL_SIZE 32 /* random tail-room */
+
+#define UDP_PORT 1234
+
+static int __init test_blackholedev_init(void)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *ethh;
+ struct udphdr *uh;
+ int data_len;
+ int ret;
+
+ skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Reserve head-room for the headers */
+ skb_reserve(skb, HEAD_SIZE);
+
+ /* Add data to the skb */
+ data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
+ memset(__skb_put(skb, data_len), 0xf, data_len);
+
+ /* Add protocol data */
+ /* (Transport) UDP */
+ uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
+ skb_set_transport_header(skb, 0);
+ uh->source = uh->dest = htons(UDP_PORT);
+ uh->len = htons(data_len);
+ uh->check = 0;
+ /* (Network) IPv6 */
+ ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
+ skb_set_network_header(skb, 0);
+ ip6h->hop_limit = 32;
+ ip6h->payload_len = data_len + sizeof(struct udphdr);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->saddr = in6addr_loopback;
+ ip6h->daddr = in6addr_loopback;
+ /* Ether */
+ ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_set_mac_header(skb, 0);
+
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = blackhole_netdev;
+
+ /* Now attempt to send the packet */
+ ret = dev_queue_xmit(skb);
+
+ switch (ret) {
+ case NET_XMIT_SUCCESS:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
+ break;
+ case NET_XMIT_DROP:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
+ break;
+ case NET_XMIT_CN:
+ pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
+ break;
+ default:
+ pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
+ }
+
+ return 0;
+}
+
+static void __exit test_blackholedev_exit(void)
+{
+ pr_warn("test_blackholedev module terminating.\n");
+}
+
+module_init(test_blackholedev_init);
+module_exit(test_blackholedev_exit);
+
+MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
new file mode 100644
index 000000000..ecde42162
--- /dev/null
+++ b/lib/test_bpf.c
@@ -0,0 +1,15356 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Testsuite for BPF interpreter and BPF JIT compiler
+ *
+ * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/random.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+
+/* General test specific settings */
+#define MAX_SUBTESTS 3
+#define MAX_TESTRUNS 1000
+#define MAX_DATA 128
+#define MAX_INSNS 512
+#define MAX_K 0xffffFFFF
+
+/* Few constants used to init test 'skb' */
+#define SKB_TYPE 3
+#define SKB_MARK 0x1234aaaa
+#define SKB_HASH 0x1234aaab
+#define SKB_QUEUE_MAP 123
+#define SKB_VLAN_TCI 0xffff
+#define SKB_VLAN_PRESENT 1
+#define SKB_DEV_IFINDEX 577
+#define SKB_DEV_TYPE 588
+
+/* Redefine REGs to make tests less verbose */
+#define R0 BPF_REG_0
+#define R1 BPF_REG_1
+#define R2 BPF_REG_2
+#define R3 BPF_REG_3
+#define R4 BPF_REG_4
+#define R5 BPF_REG_5
+#define R6 BPF_REG_6
+#define R7 BPF_REG_7
+#define R8 BPF_REG_8
+#define R9 BPF_REG_9
+#define R10 BPF_REG_10
+
+/* Flags that can be passed to test cases */
+#define FLAG_NO_DATA BIT(0)
+#define FLAG_EXPECTED_FAIL BIT(1)
+#define FLAG_SKB_FRAG BIT(2)
+#define FLAG_VERIFIER_ZEXT BIT(3)
+#define FLAG_LARGE_MEM BIT(4)
+
+enum {
+ CLASSIC = BIT(6), /* Old BPF instructions only. */
+ INTERNAL = BIT(7), /* Extended instruction set. */
+};
+
+#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
+
+struct bpf_test {
+ const char *descr;
+ union {
+ struct sock_filter insns[MAX_INSNS];
+ struct bpf_insn insns_int[MAX_INSNS];
+ struct {
+ void *insns;
+ unsigned int len;
+ } ptr;
+ } u;
+ __u8 aux;
+ __u8 data[MAX_DATA];
+ struct {
+ int data_size;
+ __u32 result;
+ } test[MAX_SUBTESTS];
+ int (*fill_helper)(struct bpf_test *self);
+ int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
+ __u8 frag_data[MAX_DATA];
+ int stack_depth; /* for eBPF only, since tests don't call verifier */
+ int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
+};
+
+/* Large test cases need separate allocation and fill handler. */
+
+static int bpf_fill_maxinsns1(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ __u32 k = ~0;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++, k--)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns2(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns3(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ struct rnd_state rnd;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+
+ for (i = 0; i < len - 1; i++) {
+ __u32 k = prandom_u32_state(&rnd);
+
+ insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns4(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS + 1;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns5(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
+
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns6(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+ SKF_AD_VLAN_TAG_PRESENT);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns7(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 4; i++)
+ insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+ SKF_AD_CPU);
+
+ insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
+ insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+ SKF_AD_CPU);
+ insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns8(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i, jmp_off = len - 3;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);
+
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns9(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
+ insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
+ insn[2] = BPF_EXIT_INSN();
+
+ for (i = 3; i < len - 2; i++)
+ insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);
+
+ insn[len - 2] = BPF_EXIT_INSN();
+ insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns10(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS, hlen = len - 2;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < hlen / 2; i++)
+ insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
+ for (i = hlen - 1; i > hlen / 2; i--)
+ insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);
+
+ insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
+ insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
+ insn[hlen + 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
+ unsigned int plen)
+{
+ struct sock_filter *insn;
+ unsigned int rlen;
+ int i, j;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ rlen = (len % plen) - 1;
+
+ for (i = 0; i + plen < len; i += plen)
+ for (j = 0; j < plen; j++)
+ insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
+ plen - 1 - j, 0, 0);
+ for (j = 0; j < rlen; j++)
+ insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
+ 0, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns11(struct bpf_test *self)
+{
+ /* Hits 70 passes on x86_64 and triggers NOPs padding. */
+ return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
+}
+
+static int bpf_fill_maxinsns12(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
+
+ for (i = 1; i < len - 1; i++)
+ insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_maxinsns13(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 3; i++)
+ insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
+
+ insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
+ insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int bpf_fill_ja(struct bpf_test *self)
+{
+ /* Hits exactly 11 passes on x86_64 JIT. */
+ return __bpf_fill_ja(self, 12, 9);
+}
+
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct sock_filter *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ for (i = 0; i < len - 1; i += 2) {
+ insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+ insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU);
+ }
+
+ insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+static int __bpf_fill_stxdw(struct bpf_test *self, int size)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
+ insn[1] = BPF_ST_MEM(size, R10, -40, 42);
+
+ for (i = 2; i < len - 2; i++)
+ insn[i] = BPF_STX_XADD(size, R10, R0, -40);
+
+ insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ self->stack_depth = 40;
+
+ return 0;
+}
+
+static int bpf_fill_stxw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_W);
+}
+
+static int bpf_fill_stxdw(struct bpf_test *self)
+{
+ return __bpf_fill_stxdw(self, BPF_DW);
+}
+
+static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
+{
+ struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
+
+ memcpy(insns, tmp, sizeof(tmp));
+ return 2;
+}
+
+/*
+ * Branch conversion tests. Complex operations can expand to a lot
+ * of instructions when JITed. This in turn may cause jump offsets
+ * to overflow the field size of the native instruction, triggering
+ * a branch conversion mechanism in some JITs.
+ */
+static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
+{
+ struct bpf_insn *insns;
+ int len = S16_MAX + 5;
+ int i;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
+ insns[i++] = BPF_EXIT_INSN();
+
+ while (i < len - 1) {
+ static const int ops[] = {
+ BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
+ BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
+ };
+ int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
+
+ if (i & 1)
+ insns[i++] = BPF_ALU32_REG(op, R0, R1);
+ else
+ insns[i++] = BPF_ALU64_REG(op, R0, R1);
+ }
+
+ insns[i++] = BPF_EXIT_INSN();
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* Branch taken by runtime decision */
+static int bpf_fill_max_jmp_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
+}
+
+/* Branch not taken by runtime decision */
+static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
+}
+
+/* Branch always taken, known at JIT time */
+static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JGE, 0);
+}
+
+/* Branch never taken, known at JIT time */
+static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
+{
+ return __bpf_fill_max_jmp(self, BPF_JLT, 0);
+}
+
+/* ALU result computation used in tests */
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+{
+ *res = 0;
+ switch (op) {
+ case BPF_MOV:
+ *res = v2;
+ break;
+ case BPF_AND:
+ *res = v1 & v2;
+ break;
+ case BPF_OR:
+ *res = v1 | v2;
+ break;
+ case BPF_XOR:
+ *res = v1 ^ v2;
+ break;
+ case BPF_LSH:
+ *res = v1 << v2;
+ break;
+ case BPF_RSH:
+ *res = v1 >> v2;
+ break;
+ case BPF_ARSH:
+ *res = v1 >> v2;
+ if (v2 > 0 && v1 > S64_MAX)
+ *res |= ~0ULL << (64 - v2);
+ break;
+ case BPF_ADD:
+ *res = v1 + v2;
+ break;
+ case BPF_SUB:
+ *res = v1 - v2;
+ break;
+ case BPF_MUL:
+ *res = v1 * v2;
+ break;
+ case BPF_DIV:
+ if (v2 == 0)
+ return false;
+ *res = div64_u64(v1, v2);
+ break;
+ case BPF_MOD:
+ if (v2 == 0)
+ return false;
+ div64_u64_rem(v1, v2, res);
+ break;
+ }
+ return true;
+}
+
+/* Test an ALU shift operation for all valid shift values */
+static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
+ u8 mode, bool alu32)
+{
+ static const s64 regs[] = {
+ 0x0123456789abcdefLL, /* dword > 0, word < 0 */
+ 0xfedcba9876543210LL, /* dword < 0, word > 0 */
+ 0xfedcba0198765432LL, /* dword < 0, word < 0 */
+ 0x0123458967abcdefLL, /* dword > 0, word > 0 */
+ };
+ int bits = alu32 ? 32 : 64;
+ int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
+ struct bpf_insn *insn;
+ int imm, k;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (k = 0; k < ARRAY_SIZE(regs); k++) {
+ s64 reg = regs[k];
+
+ i += __bpf_ld_imm64(&insn[i], R3, reg);
+
+ for (imm = 0; imm < bits; imm++) {
+ u64 val;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
+ if (alu32) {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU32_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU32_REG(op, R1, R2);
+
+ if (op == BPF_ARSH)
+ reg = (s32)reg;
+ else
+ reg = (u32)reg;
+ __bpf_alu_result(&val, reg, imm, op);
+ val = (u32)val;
+ } else {
+ if (mode == BPF_K)
+ insn[i++] = BPF_ALU64_IMM(op, R1, imm);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R2);
+ __bpf_alu_result(&val, reg, imm, op);
+ }
+
+ /*
+ * When debugging a JIT that fails this test, one
+ * can write the immediate value to R0 here to find
+ * out which operand values that fail.
+ */
+
+ /* Load reference and check the result */
+ i += __bpf_ld_imm64(&insn[i], R4, val);
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
+}
+
+static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
+}
+
+static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
+}
+
+static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
+}
+
+static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
+}
+
+static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
+}
+
+/*
+ * Test an ALU register shift operation for all valid shift values
+ * for the case when the source and destination are the same.
+ */
+static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
+ bool alu32)
+{
+ int bits = alu32 ? 32 : 64;
+ int len = 3 + 6 * bits;
+ struct bpf_insn *insn;
+ int i = 0;
+ u64 val;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (val = 0; val < bits; val++) {
+ u64 res;
+
+ /* Perform operation */
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
+ if (alu32)
+ insn[i++] = BPF_ALU32_REG(op, R1, R1);
+ else
+ insn[i++] = BPF_ALU64_REG(op, R1, R1);
+
+ /* Compute the reference result */
+ __bpf_alu_result(&res, val, val, op);
+ if (alu32)
+ res = (u32)res;
+ i += __bpf_ld_imm64(&insn[i], R2, res);
+
+ /* Check the actual result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
+}
+
+/*
+ * Common operand pattern generator for exhaustive power-of-two magnitudes
+ * tests. The block size parameters can be adjusted to increase/reduce the
+ * number of combinatons tested and thereby execution speed and memory
+ * footprint.
+ */
+
+static inline s64 value(int msb, int delta, int sign)
+{
+ return sign * (1LL << msb) + delta;
+}
+
+static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
+ int dbits, int sbits, int block1, int block2,
+ int (*emit)(struct bpf_test*, void*,
+ struct bpf_insn*, s64, s64))
+{
+ static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
+ struct bpf_insn *insns;
+ int di, si, bt, db, sb;
+ int count, len, k;
+ int extra = 1 + 2;
+ int i = 0;
+
+ /* Total number of iterations for the two pattern */
+ count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
+ count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);
+
+ /* Compute the maximum number of insns and allocate the buffer */
+ len = extra + count * (*emit)(self, arg, NULL, 0, 0);
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Add head instruction(s) */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ /*
+ * Pattern 1: all combinations of power-of-two magnitudes and sign,
+ * and with a block of contiguous values around each magnitude.
+ */
+ for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */
+ for (si = 0; si < sbits - 1; si++) /* Src magnitudes */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block1 / 2);
+ db < (block1 + 1) / 2; db++)
+ for (sb = -(block1 / 2);
+ sb < (block1 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(di, db, sgn[k][0]);
+ src = value(si, sb, sgn[k][1]);
+ i += (*emit)(self, arg,
+ &insns[i],
+ dst, src);
+ }
+ /*
+ * Pattern 2: all combinations for a larger block of values
+ * for each power-of-two magnitude and sign, where the magnitude is
+ * the same for both operands.
+ */
+ for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */
+ for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
+ for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
+ for (sb = -(block2 / 2);
+ sb < (block2 + 1) / 2; sb++) {
+ s64 dst, src;
+
+ dst = value(bt % dbits, db, sgn[k][0]);
+ src = value(bt % sbits, sb, sgn[k][1]);
+ i += (*emit)(self, arg, &insns[i],
+ dst, src);
+ }
+
+ /* Append tail instructions */
+ insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ BUG_ON(i > len);
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = i;
+
+ return 0;
+}
+
+/*
+ * Block size parameters used in pattern tests below. une as needed to
+ * increase/reduce the number combinations tested, see following examples.
+ * block values per operand MSB
+ * ----------------------------------------
+ * 0 none
+ * 1 (1 << MSB)
+ * 2 (1 << MSB) + [-1, 0]
+ * 3 (1 << MSB) + [-1, 0, 1]
+ */
+#define PATTERN_BLOCK1 1
+#define PATTERN_BLOCK2 5
+
+/* Number of test runs for a pattern test */
+#define NR_PATTERN_RUNS 1
+
+/*
+ * Exhaustive tests of ALU operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 7;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_IMM(op, R1, imm);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, dst, src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ insns[i++] = BPF_ALU64_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ int i = 0;
+ u64 res;
+
+ if (!insns)
+ return 9;
+
+ if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ insns[i++] = BPF_ALU32_REG(op, R1, R2);
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ return i;
+}
+
+static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_imm);
+}
+
+static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_imm);
+}
+
+static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu64_reg);
+}
+
+static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_alu32_reg);
+}
+
+/* ALU64 immediate operations */
+static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_imm(self, BPF_MOD);
+}
+
+/* ALU32 immediate operations */
+static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_imm(self, BPF_MOD);
+}
+
+/* ALU64 register operations */
+static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu64_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu64_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu64_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu64_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu64_reg(self, BPF_MOD);
+}
+
+/* ALU32 register operations */
+static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOV);
+}
+
+static int bpf_fill_alu32_and_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_AND);
+}
+
+static int bpf_fill_alu32_or_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_OR);
+}
+
+static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_XOR);
+}
+
+static int bpf_fill_alu32_add_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_ADD);
+}
+
+static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_SUB);
+}
+
+static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MUL);
+}
+
+static int bpf_fill_alu32_div_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_DIV);
+}
+
+static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
+{
+ return __bpf_fill_alu32_reg(self, BPF_MOD);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
+{
+ int len = 2 + 10 * 10;
+ struct bpf_insn *insns;
+ u64 dst, res;
+ int i = 0;
+ u32 imm;
+ int rd;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Operand and result values according to operation */
+ if (alu32)
+ dst = 0x76543210U;
+ else
+ dst = 0x7edcba9876543210ULL;
+ imm = 0x01234567U;
+
+ if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+ imm &= 31;
+
+ __bpf_alu_result(&res, dst, imm, op);
+
+ if (alu32)
+ res = (u32)res;
+
+ /* Check all operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ i += __bpf_ld_imm64(&insns[i], rd, dst);
+
+ if (alu32)
+ insns[i++] = BPF_ALU32_IMM(op, rd, imm);
+ else
+ insns[i++] = BPF_ALU64_IMM(op, rd, imm);
+
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+
+ insns[i++] = BPF_MOV64_IMM(R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* ALU64 K registers */
+static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
+}
+
+/* ALU32 K registers */
+static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
+}
+
+/*
+ * Test JITs that implement complex ALU operations as function
+ * calls, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
+{
+ int len = 2 + 10 * 10 * 12;
+ u64 dst, src, res, same;
+ struct bpf_insn *insns;
+ int rd, rs;
+ int i = 0;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Operand and result values according to operation */
+ if (alu32) {
+ dst = 0x76543210U;
+ src = 0x01234567U;
+ } else {
+ dst = 0x7edcba9876543210ULL;
+ src = 0x0123456789abcdefULL;
+ }
+
+ if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
+ src &= 31;
+
+ __bpf_alu_result(&res, dst, src, op);
+ __bpf_alu_result(&same, src, src, op);
+
+ if (alu32) {
+ res = (u32)res;
+ same = (u32)same;
+ }
+
+ /* Check all combinations of operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ for (rs = R0; rs <= R9; rs++) {
+ u64 val = rd == rs ? same : res;
+
+ i += __bpf_ld_imm64(&insns[i], rd, dst);
+ i += __bpf_ld_imm64(&insns[i], rs, src);
+
+ if (alu32)
+ insns[i++] = BPF_ALU32_REG(op, rd, rs);
+ else
+ insns[i++] = BPF_ALU64_REG(op, rd, rs);
+
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
+ insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insns[i++] = BPF_MOV64_IMM(R0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/* ALU64 X register combinations */
+static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false);
+}
+
+static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_AND, false);
+}
+
+static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_OR, false);
+}
+
+static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false);
+}
+
+static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false);
+}
+
+static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false);
+}
+
+static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false);
+}
+
+static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false);
+}
+
+static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false);
+}
+
+static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false);
+}
+
+static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false);
+}
+
+static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false);
+}
+
+/* ALU32 X register combinations */
+static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true);
+}
+
+static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_AND, true);
+}
+
+static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_OR, true);
+}
+
+static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true);
+}
+
+static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true);
+}
+
+static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true);
+}
+
+static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true);
+}
+
+static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true);
+}
+
+static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true);
+}
+
+static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true);
+}
+
+static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true);
+}
+
+static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true);
+}
+
+/*
+ * Exhaustive tests of atomic operations for all power-of-two operand
+ * magnitudes, both for positive and negative values.
+ */
+
+static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ u64 keep, fetch, res;
+ int i = 0;
+
+ if (!insns)
+ return 21;
+
+ switch (op) {
+ case BPF_XCHG:
+ res = src;
+ break;
+ default:
+ __bpf_alu_result(&res, dst, src, BPF_OP(op));
+ }
+
+ keep = 0x0123456789abcdefULL;
+ if (op & BPF_FETCH)
+ fetch = dst;
+ else
+ fetch = src;
+
+ i += __bpf_ld_imm64(&insns[i], R0, keep);
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, res);
+ i += __bpf_ld_imm64(&insns[i], R4, fetch);
+ i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+ insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+ u64 keep, fetch, res;
+ int i = 0;
+
+ if (!insns)
+ return 21;
+
+ switch (op) {
+ case BPF_XCHG:
+ res = src;
+ break;
+ default:
+ __bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
+ }
+
+ keep = 0x0123456789abcdefULL;
+ if (op & BPF_FETCH)
+ fetch = (u32)dst;
+ else
+ fetch = src;
+
+ i += __bpf_ld_imm64(&insns[i], R0, keep);
+ i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
+ i += __bpf_ld_imm64(&insns[i], R4, fetch);
+ i += __bpf_ld_imm64(&insns[i], R5, keep);
+
+ insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
+ insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int i = 0;
+
+ if (!insns)
+ return 23;
+
+ i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+
+ /* Result unsuccessful */
+ insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ /* Result successful */
+ insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
+ insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int i = 0;
+
+ if (!insns)
+ return 27;
+
+ i += __bpf_ld_imm64(&insns[i], R0, ~dst);
+ i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+
+ /* Result unsuccessful */
+ insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+ insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+ insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ /* Result successful */
+ i += __bpf_ld_imm64(&insns[i], R0, dst);
+ insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
+ insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
+ insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
+
+ insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+}
+
+static int __bpf_fill_atomic64(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ 0, PATTERN_BLOCK2,
+ &__bpf_emit_atomic64);
+}
+
+static int __bpf_fill_atomic32(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ 0, PATTERN_BLOCK2,
+ &__bpf_emit_atomic32);
+}
+
+/* 64-bit atomic operations */
+static int bpf_fill_atomic64_add(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg(struct bpf_test *self)
+{
+ return __bpf_fill_atomic64(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg64(struct bpf_test *self)
+{
+ return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+ &__bpf_emit_cmpxchg64);
+}
+
+/* 32-bit atomic operations */
+static int bpf_fill_atomic32_add(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg(struct bpf_test *self)
+{
+ return __bpf_fill_atomic32(self, BPF_XCHG);
+}
+
+static int bpf_fill_cmpxchg32(struct bpf_test *self)
+{
+ return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
+ &__bpf_emit_cmpxchg32);
+}
+
+/*
+ * Test JITs that implement ATOMIC operations as function calls or
+ * other primitives, and must re-arrange operands for argument passing.
+ */
+static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
+{
+ struct bpf_insn *insn;
+ int len = 2 + 34 * 10 * 10;
+ u64 mem, upd, res;
+ int rd, rs, i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ /* Operand and memory values */
+ if (width == BPF_DW) {
+ mem = 0x0123456789abcdefULL;
+ upd = 0xfedcba9876543210ULL;
+ } else { /* BPF_W */
+ mem = 0x01234567U;
+ upd = 0x76543210U;
+ }
+
+ /* Memory updated according to operation */
+ switch (op) {
+ case BPF_XCHG:
+ res = upd;
+ break;
+ case BPF_CMPXCHG:
+ res = mem;
+ break;
+ default:
+ __bpf_alu_result(&res, mem, upd, BPF_OP(op));
+ }
+
+ /* Test all operand registers */
+ for (rd = R0; rd <= R9; rd++) {
+ for (rs = R0; rs <= R9; rs++) {
+ u64 cmp, src;
+
+ /* Initialize value in memory */
+ i += __bpf_ld_imm64(&insn[i], R0, mem);
+ insn[i++] = BPF_STX_MEM(width, R10, R0, -8);
+
+ /* Initialize registers in order */
+ i += __bpf_ld_imm64(&insn[i], R0, ~mem);
+ i += __bpf_ld_imm64(&insn[i], rs, upd);
+ insn[i++] = BPF_MOV64_REG(rd, R10);
+
+ /* Perform atomic operation */
+ insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8);
+ if (op == BPF_CMPXCHG && width == BPF_W)
+ insn[i++] = BPF_ZEXT_REG(R0);
+
+ /* Check R0 register value */
+ if (op == BPF_CMPXCHG)
+ cmp = mem; /* Expect value from memory */
+ else if (R0 == rd || R0 == rs)
+ cmp = 0; /* Aliased, checked below */
+ else
+ cmp = ~mem; /* Expect value to be preserved */
+ if (cmp) {
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+ (u32)cmp, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32);
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
+ cmp >> 32, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check source register value */
+ if (rs == R0 && op == BPF_CMPXCHG)
+ src = 0; /* Aliased with R0, checked above */
+ else if (rs == rd && (op == BPF_CMPXCHG ||
+ !(op & BPF_FETCH)))
+ src = 0; /* Aliased with rd, checked below */
+ else if (op == BPF_CMPXCHG)
+ src = upd; /* Expect value to be preserved */
+ else if (op & BPF_FETCH)
+ src = mem; /* Expect fetched value from mem */
+ else /* no fetch */
+ src = upd; /* Expect value to be preserved */
+ if (src) {
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+ (u32)src, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32);
+ insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
+ src >> 32, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check destination register value */
+ if (!(rd == R0 && op == BPF_CMPXCHG) &&
+ !(rd == rs && (op & BPF_FETCH))) {
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ /* Check value in memory */
+ if (rs != rd) { /* No aliasing */
+ i += __bpf_ld_imm64(&insn[i], R1, res);
+ } else if (op == BPF_XCHG) { /* Aliased, XCHG */
+ insn[i++] = BPF_MOV64_REG(R1, R10);
+ } else if (op == BPF_CMPXCHG) { /* Aliased, CMPXCHG */
+ i += __bpf_ld_imm64(&insn[i], R1, mem);
+ } else { /* Aliased, ALU oper */
+ i += __bpf_ld_imm64(&insn[i], R1, mem);
+ insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10);
+ }
+
+ insn[i++] = BPF_LDX_MEM(width, R0, R10, -8);
+ if (width == BPF_DW)
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
+ else /* width == BPF_W */
+ insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2);
+ insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+
+ insn[i++] = BPF_MOV64_IMM(R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = i;
+ BUG_ON(i > len);
+
+ return 0;
+}
+
+/* 64-bit atomic register tests */
+static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD);
+}
+
+static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND);
+}
+
+static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR);
+}
+
+static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR);
+}
+
+static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG);
+}
+
+static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG);
+}
+
+/* 32-bit atomic register tests */
+static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD);
+}
+
+static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND);
+}
+
+static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR);
+}
+
+static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR);
+}
+
+static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH);
+}
+
+static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG);
+}
+
+static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
+{
+ return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG);
+}
+
+/*
+ * Test the two-instruction 64-bit immediate load operation for all
+ * power-of-two magnitudes of the immediate operand. For each MSB, a block
+ * of immediate values centered around the power-of-two MSB are tested,
+ * both for positive and negative values. The test is designed to verify
+ * the operation for JITs that emit different code depending on the magnitude
+ * of the immediate value. This is often the case if the native instruction
+ * immediate field width is narrower than 32 bits.
+ */
+static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
+{
+ int block = 64; /* Increase for more tests per MSB position */
+ int len = 3 + 8 * 63 * block * 2;
+ struct bpf_insn *insn;
+ int bit, adj, sign;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (bit = 0; bit <= 62; bit++) {
+ for (adj = -block / 2; adj < block / 2; adj++) {
+ for (sign = -1; sign <= 1; sign += 2) {
+ s64 imm = sign * ((1LL << bit) + adj);
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
+ (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ }
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+/*
+ * Test the two-instruction 64-bit immediate load operation for different
+ * combinations of bytes. Each byte in the 64-bit word is constructed as
+ * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
+ * All patterns (base1, mask1) and (base2, mask2) bytes are tested.
+ */
+static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
+ u8 base1, u8 mask1,
+ u8 base2, u8 mask2)
+{
+ struct bpf_insn *insn;
+ int len = 3 + 8 * BIT(8);
+ int pattern, index;
+ u32 rand = 1;
+ int i = 0;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+
+ for (pattern = 0; pattern < BIT(8); pattern++) {
+ u64 imm = 0;
+
+ for (index = 0; index < 8; index++) {
+ int byte;
+
+ if (pattern & BIT(index))
+ byte = (base1 & mask1) | (rand & ~mask1);
+ else
+ byte = (base2 & mask2) | (rand & ~mask2);
+ imm = (imm << 8) | byte;
+ }
+
+ /* Update our LCG */
+ rand = rand * 1664525 + 1013904223;
+
+ /* Perform operation */
+ i += __bpf_ld_imm64(&insn[i], R1, imm);
+
+ /* Load reference */
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32));
+ insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
+
+ /* Check result */
+ insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
+ insn[i++] = BPF_EXIT_INSN();
+ }
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[i++] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+ BUG_ON(i != len);
+
+ return 0;
+}
+
+static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff);
+}
+
+static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80);
+}
+
+static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff);
+}
+
+static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
+{
+ return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff);
+}
+
+/*
+ * Exhaustive tests of JMP operations for all combinations of power-of-two
+ * magnitudes of the operands, both for positive and negative values. The
+ * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
+ * emit different code depending on the magnitude of the immediate value.
+ */
+
+static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
+{
+ switch (op) {
+ case BPF_JSET:
+ return !!(v1 & v2);
+ case BPF_JEQ:
+ return v1 == v2;
+ case BPF_JNE:
+ return v1 != v2;
+ case BPF_JGT:
+ return (u64)v1 > (u64)v2;
+ case BPF_JGE:
+ return (u64)v1 >= (u64)v2;
+ case BPF_JLT:
+ return (u64)v1 < (u64)v2;
+ case BPF_JLE:
+ return (u64)v1 <= (u64)v2;
+ case BPF_JSGT:
+ return v1 > v2;
+ case BPF_JSGE:
+ return v1 >= v2;
+ case BPF_JSLT:
+ return v1 < v2;
+ case BPF_JSLE:
+ return v1 <= v2;
+ }
+ return false;
+}
+
+static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
+ int i = 0;
+
+ insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5 + 1;
+}
+
+static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 imm)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 5;
+}
+
+static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond(dst, src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
+ struct bpf_insn *insns, s64 dst, s64 src)
+{
+ int op = *(int *)arg;
+
+ if (insns) {
+ bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
+ int i = 0;
+
+ i += __bpf_ld_imm64(&insns[i], R1, dst);
+ i += __bpf_ld_imm64(&insns[i], R2, src);
+ insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
+ if (!match)
+ insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ insns[i++] = BPF_EXIT_INSN();
+
+ return i;
+ }
+
+ return 7;
+}
+
+static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_imm);
+}
+
+static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 32,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_imm);
+}
+
+static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp_reg);
+}
+
+static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
+{
+ return __bpf_fill_pattern(self, &op, 64, 64,
+ PATTERN_BLOCK1, PATTERN_BLOCK2,
+ &__bpf_emit_jmp32_reg);
+}
+
+/* JMP immediate tests */
+static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_imm(self, BPF_JSLE);
+}
+
+/* JMP32 immediate tests */
+static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_imm(self, BPF_JSLE);
+}
+
+/* JMP register tests */
+static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp_reg(self, BPF_JSLE);
+}
+
+/* JMP32 register tests */
+static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSET);
+}
+
+static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JEQ);
+}
+
+static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JNE);
+}
+
+static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGT);
+}
+
+static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JGE);
+}
+
+static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLT);
+}
+
+static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JLE);
+}
+
+static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGT);
+}
+
+static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSGE);
+}
+
+static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLT);
+}
+
+static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
+{
+ return __bpf_fill_jmp32_reg(self, BPF_JSLE);
+}
+
+/*
+ * Set up a sequence of staggered jumps, forwards and backwards with
+ * increasing offset. This tests the conversion of relative jumps to
+ * JITed native jumps. On some architectures, for example MIPS, a large
+ * PC-relative jump offset may overflow the immediate field of the native
+ * conditional branch instruction, triggering a conversion to use an
+ * absolute jump instead. Since this changes the jump offsets, another
+ * offset computation pass is necessary, and that may in turn trigger
+ * another branch conversion. This jump sequence is particularly nasty
+ * in that regard.
+ *
+ * The sequence generation is parameterized by size and jump type.
+ * The size must be even, and the expected result is always size + 1.
+ * Below is an example with size=8 and result=9.
+ *
+ * ________________________Start
+ * R0 = 0
+ * R1 = r1
+ * R2 = r2
+ * ,------- JMP +4 * 3______________Preamble: 4 insns
+ * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
+ * | | R0 = 8 |
+ * | | JMP +7 * 3 ------------------------.
+ * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------. | |
+ * | | | R0 = 6 | | |
+ * | | | JMP +5 * 3 ------------------. | |
+ * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------. | | | |
+ * | | | | R0 = 4 | | | | |
+ * | | | | JMP +3 * 3 ------------. | | | |
+ * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | |
+ * | | | | | R0 = 2 | | | | | | |
+ * | | | | | JMP +1 * 3 ------. | | | | | |
+ * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc
+ * | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off
+ * | | | | | JMP -2 * 3 ---' | | | | | | |
+ * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | |
+ * | | | | | | R0 = 3 | | | | | |
+ * | | | | | | JMP -4 * 3 ---------' | | | | |
+ * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | |
+ * | | | | | | | R0 = 5 | | | |
+ * | | | | | | | JMP -6 * 3 ---------------' | | |
+ * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | |
+ * | | | | | | | | R0 = 7 | |
+ * | | Error | | | JMP -8 * 3 ---------------------' |
+ * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
+ * | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns
+ * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
+ *
+ */
+
+/* The maximum size parameter */
+#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
+
+/* We use a reduced number of iterations to get a reasonable execution time */
+#define NR_STAGGERED_JMP_RUNS 10
+
+static int __bpf_fill_staggered_jumps(struct bpf_test *self,
+ const struct bpf_insn *jmp,
+ u64 r1, u64 r2)
+{
+ int size = self->test[0].result - 1;
+ int len = 4 + 3 * (size + 1);
+ struct bpf_insn *insns;
+ int off, ind;
+
+ insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
+ if (!insns)
+ return -ENOMEM;
+
+ /* Preamble */
+ insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
+ insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
+ insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
+ insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
+
+ /* Sequence */
+ for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
+ struct bpf_insn *ins = &insns[4 + 3 * ind];
+ int loc;
+
+ if (off == 0)
+ off--;
+
+ loc = abs(off);
+ ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
+ 3 * (size - ind) + 1);
+ ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
+ ins[2] = *jmp;
+ ins[2].off = 3 * (off - 1);
+ }
+
+ /* Return */
+ insns[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insns;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
+/* 64-bit unconditional jump */
+static int bpf_fill_staggered_ja(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
+}
+
+/* 64-bit immediate jumps */
+static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 64-bit register jumps */
+static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+/* 32-bit immediate jumps */
+static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
+}
+
+static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
+}
+
+static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
+}
+
+static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
+}
+
+static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
+}
+
+static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
+}
+
+/* 32-bit register jumps */
+static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
+}
+
+static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
+}
+
+static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
+}
+
+static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
+}
+
+static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
+}
+
+static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
+}
+
+static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
+}
+
+static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
+}
+
+static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
+{
+ struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
+
+ return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
+}
+
+
+static struct bpf_test tests[] = {
+ {
+ "TAX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 10, 20, 30, 40, 50 },
+ { { 2, 10 }, { 3, 20 }, { 4, 30 } },
+ },
+ {
+ "TXA",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
+ },
+ CLASSIC,
+ { 10, 20, 30, 40, 50 },
+ { { 1, 2 }, { 3, 6 }, { 4, 8 } },
+ },
+ {
+ "ADD_SUB_MUL_K",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xfffffffd } }
+ },
+ {
+ "DIV_MOD_KX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 8),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
+ BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x20000000 } }
+ },
+ {
+ "AND_OR_LSH_K",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xff),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xf),
+ BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x800000ff }, { 1, 0x800000ff } },
+ },
+ {
+ "LD_IMM_0",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ },
+ CLASSIC,
+ { },
+ { { 1, 1 } },
+ },
+ {
+ "LD_IND",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
+ BPF_STMT(BPF_RET | BPF_K, 1)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+ },
+ {
+ "LD_ABS",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
+ BPF_STMT(BPF_RET | BPF_K, 1)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 }, { 60, 0 } },
+ },
+ {
+ "LD_ABS_LL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 1, 2, 3 },
+ { { 1, 0 }, { 2, 3 } },
+ },
+ {
+ "LD_IND_LL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 1, 2, 3, 0xff },
+ { { 1, 1 }, { 3, 3 }, { 4, 0xff } },
+ },
+ {
+ "LD_ABS_NET",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+ { { 15, 0 }, { 16, 3 } },
+ },
+ {
+ "LD_IND_NET",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
+ { { 14, 0 }, { 15, 1 }, { 17, 3 } },
+ },
+ {
+ "LD_PKTTYPE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PKTTYPE),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 3 }, { 10, 3 } },
+ },
+ {
+ "LD_MARK",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_MARK),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_MARK}, { 10, SKB_MARK} },
+ },
+ {
+ "LD_RXHASH",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_RXHASH),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_HASH}, { 10, SKB_HASH} },
+ },
+ {
+ "LD_QUEUE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_QUEUE),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
+ },
+ {
+ "LD_PROTOCOL",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PROTOCOL),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { 10, 20, 30 },
+ { { 10, ETH_P_IP }, { 100, ETH_P_IP } },
+ },
+ {
+ "LD_VLAN_TAG",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ {
+ { 1, SKB_VLAN_TCI },
+ { 10, SKB_VLAN_TCI }
+ },
+ },
+ {
+ "LD_VLAN_TAG_PRESENT",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ {
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
+ },
+ },
+ {
+ "LD_IFINDEX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_IFINDEX),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
+ },
+ {
+ "LD_HATYPE",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_HATYPE),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
+ },
+ {
+ "LD_CPU",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_CPU),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 }, { 10, 0 } },
+ },
+ {
+ "LD_NLATTR",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 2),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+#ifdef __BIG_ENDIAN
+ { 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+ { 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+ { { 4, 0 }, { 20, 6 } },
+ },
+ {
+ "LD_NLATTR_NEST",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 3),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_NLATTR_NEST),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+#ifdef __BIG_ENDIAN
+ { 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
+#else
+ { 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
+#endif
+ { { 4, 0 }, { 20, 10 } },
+ },
+ {
+ "LD_PAYLOAD_OFF",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_PAY_OFFSET),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ /* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
+ * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
+ * id 9737, seq 1, length 64
+ */
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
+ 0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
+ { { 30, 0 }, { 100, 42 } },
+ },
+ {
+ "LD_ANC_XOR",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 10),
+ BPF_STMT(BPF_LDX | BPF_IMM, 300),
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_ALU_XOR_X),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } },
+ },
+ {
+ "SPILL_FILL",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 2),
+ BPF_STMT(BPF_ALU | BPF_RSH, 1),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
+ BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
+ BPF_STMT(BPF_STX, 15), /* M3 = len */
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_LD | BPF_MEM, 2),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
+ },
+ {
+ "JEQ",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 3, 3, 3, 3, 3 },
+ { { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
+ },
+ {
+ "JGT",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 4, 3, 3 },
+ { { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
+ },
+ {
+ "JGE (jt 0), test 1",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 4, 3, 3 },
+ { { 2, 0 }, { 3, 1 }, { 4, 1 } },
+ },
+ {
+ "JGE (jt 0), test 2",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 4, 4, 5, 3, 3 },
+ { { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
+ },
+ {
+ "JGE",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 10),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 20),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 40),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 1, 2, 3, 4, 5 },
+ { { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
+ },
+ {
+ "JSET",
+ .u.insns = {
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
+ BPF_STMT(BPF_LDX | BPF_LEN, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 10),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 30),
+ BPF_STMT(BPF_RET | BPF_K, MAX_K)
+ },
+ CLASSIC,
+ { 0, 0xAA, 0x55, 1 },
+ { { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
+ },
+ {
+ "tcpdump port 22",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ },
+ CLASSIC,
+ /* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
+ * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
+ * seq 1305692979:1305693027, ack 3650467037, win 65535,
+ * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
+ */
+ { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+ 0x08, 0x00,
+ 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+ 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+ 0x0a, 0x01, 0x01, 0x95, /* ip src */
+ 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+ 0xc2, 0x24,
+ 0x00, 0x16 /* dst port */ },
+ { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+ },
+ {
+ "tcpdump complex",
+ .u.insns = {
+ /* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
+ * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
+ * (len > 115 or len < 30000000000)' -d
+ */
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
+ BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
+ BPF_STMT(BPF_ST, 1),
+ BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
+ BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
+ BPF_STMT(BPF_LD | BPF_MEM, 1),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_ST, 5),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
+ BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
+ BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
+ BPF_STMT(BPF_LD | BPF_MEM, 5),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
+ BPF_STMT(BPF_LD | BPF_LEN, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0xffff),
+ BPF_STMT(BPF_RET | BPF_K, 0),
+ },
+ CLASSIC,
+ { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
+ 0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
+ 0x08, 0x00,
+ 0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
+ 0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
+ 0x0a, 0x01, 0x01, 0x95, /* ip src */
+ 0x0a, 0x01, 0x02, 0x0a, /* ip dst */
+ 0xc2, 0x24,
+ 0x00, 0x16 /* dst port */ },
+ { { 10, 0 }, { 30, 0 }, { 100, 65535 } },
+ },
+ {
+ "RET_A",
+ .u.insns = {
+ /* check that uninitialized X and A contain zeros */
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0)
+ },
+ CLASSIC,
+ { },
+ { {1, 0}, {2, 0} },
+ },
+ {
+ "INT: ADD trivial",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ BPF_ALU64_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_IMM(BPF_ADD, R1, -1),
+ BPF_ALU64_IMM(BPF_MUL, R1, 3),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffd } }
+ },
+ {
+ "INT: MUL_X",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: MUL_X2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ BPF_ALU32_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 3),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_IMM(BPF_RSH, R1, 8),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: MUL32_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 3),
+ BPF_ALU32_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_IMM(BPF_RSH, R1, 8),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ /* Have to test all register combinations, since
+ * JITing of different registers will produce
+ * different asm code.
+ */
+ "INT: ADD 64-bit",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 20),
+ BPF_ALU64_IMM(BPF_ADD, R1, 20),
+ BPF_ALU64_IMM(BPF_ADD, R2, 20),
+ BPF_ALU64_IMM(BPF_ADD, R3, 20),
+ BPF_ALU64_IMM(BPF_ADD, R4, 20),
+ BPF_ALU64_IMM(BPF_ADD, R5, 20),
+ BPF_ALU64_IMM(BPF_ADD, R6, 20),
+ BPF_ALU64_IMM(BPF_ADD, R7, 20),
+ BPF_ALU64_IMM(BPF_ADD, R8, 20),
+ BPF_ALU64_IMM(BPF_ADD, R9, 20),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_ALU64_IMM(BPF_SUB, R1, 10),
+ BPF_ALU64_IMM(BPF_SUB, R2, 10),
+ BPF_ALU64_IMM(BPF_SUB, R3, 10),
+ BPF_ALU64_IMM(BPF_SUB, R4, 10),
+ BPF_ALU64_IMM(BPF_SUB, R5, 10),
+ BPF_ALU64_IMM(BPF_SUB, R6, 10),
+ BPF_ALU64_IMM(BPF_SUB, R7, 10),
+ BPF_ALU64_IMM(BPF_SUB, R8, 10),
+ BPF_ALU64_IMM(BPF_SUB, R9, 10),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+ BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R1, R0),
+ BPF_ALU64_REG(BPF_ADD, R1, R1),
+ BPF_ALU64_REG(BPF_ADD, R1, R2),
+ BPF_ALU64_REG(BPF_ADD, R1, R3),
+ BPF_ALU64_REG(BPF_ADD, R1, R4),
+ BPF_ALU64_REG(BPF_ADD, R1, R5),
+ BPF_ALU64_REG(BPF_ADD, R1, R6),
+ BPF_ALU64_REG(BPF_ADD, R1, R7),
+ BPF_ALU64_REG(BPF_ADD, R1, R8),
+ BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+ BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R2, R0),
+ BPF_ALU64_REG(BPF_ADD, R2, R1),
+ BPF_ALU64_REG(BPF_ADD, R2, R2),
+ BPF_ALU64_REG(BPF_ADD, R2, R3),
+ BPF_ALU64_REG(BPF_ADD, R2, R4),
+ BPF_ALU64_REG(BPF_ADD, R2, R5),
+ BPF_ALU64_REG(BPF_ADD, R2, R6),
+ BPF_ALU64_REG(BPF_ADD, R2, R7),
+ BPF_ALU64_REG(BPF_ADD, R2, R8),
+ BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+ BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R3, R0),
+ BPF_ALU64_REG(BPF_ADD, R3, R1),
+ BPF_ALU64_REG(BPF_ADD, R3, R2),
+ BPF_ALU64_REG(BPF_ADD, R3, R3),
+ BPF_ALU64_REG(BPF_ADD, R3, R4),
+ BPF_ALU64_REG(BPF_ADD, R3, R5),
+ BPF_ALU64_REG(BPF_ADD, R3, R6),
+ BPF_ALU64_REG(BPF_ADD, R3, R7),
+ BPF_ALU64_REG(BPF_ADD, R3, R8),
+ BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+ BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R4, R0),
+ BPF_ALU64_REG(BPF_ADD, R4, R1),
+ BPF_ALU64_REG(BPF_ADD, R4, R2),
+ BPF_ALU64_REG(BPF_ADD, R4, R3),
+ BPF_ALU64_REG(BPF_ADD, R4, R4),
+ BPF_ALU64_REG(BPF_ADD, R4, R5),
+ BPF_ALU64_REG(BPF_ADD, R4, R6),
+ BPF_ALU64_REG(BPF_ADD, R4, R7),
+ BPF_ALU64_REG(BPF_ADD, R4, R8),
+ BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R5, R0),
+ BPF_ALU64_REG(BPF_ADD, R5, R1),
+ BPF_ALU64_REG(BPF_ADD, R5, R2),
+ BPF_ALU64_REG(BPF_ADD, R5, R3),
+ BPF_ALU64_REG(BPF_ADD, R5, R4),
+ BPF_ALU64_REG(BPF_ADD, R5, R5),
+ BPF_ALU64_REG(BPF_ADD, R5, R6),
+ BPF_ALU64_REG(BPF_ADD, R5, R7),
+ BPF_ALU64_REG(BPF_ADD, R5, R8),
+ BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+ BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R6, R0),
+ BPF_ALU64_REG(BPF_ADD, R6, R1),
+ BPF_ALU64_REG(BPF_ADD, R6, R2),
+ BPF_ALU64_REG(BPF_ADD, R6, R3),
+ BPF_ALU64_REG(BPF_ADD, R6, R4),
+ BPF_ALU64_REG(BPF_ADD, R6, R5),
+ BPF_ALU64_REG(BPF_ADD, R6, R6),
+ BPF_ALU64_REG(BPF_ADD, R6, R7),
+ BPF_ALU64_REG(BPF_ADD, R6, R8),
+ BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+ BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R7, R0),
+ BPF_ALU64_REG(BPF_ADD, R7, R1),
+ BPF_ALU64_REG(BPF_ADD, R7, R2),
+ BPF_ALU64_REG(BPF_ADD, R7, R3),
+ BPF_ALU64_REG(BPF_ADD, R7, R4),
+ BPF_ALU64_REG(BPF_ADD, R7, R5),
+ BPF_ALU64_REG(BPF_ADD, R7, R6),
+ BPF_ALU64_REG(BPF_ADD, R7, R7),
+ BPF_ALU64_REG(BPF_ADD, R7, R8),
+ BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+ BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R8, R0),
+ BPF_ALU64_REG(BPF_ADD, R8, R1),
+ BPF_ALU64_REG(BPF_ADD, R8, R2),
+ BPF_ALU64_REG(BPF_ADD, R8, R3),
+ BPF_ALU64_REG(BPF_ADD, R8, R4),
+ BPF_ALU64_REG(BPF_ADD, R8, R5),
+ BPF_ALU64_REG(BPF_ADD, R8, R6),
+ BPF_ALU64_REG(BPF_ADD, R8, R7),
+ BPF_ALU64_REG(BPF_ADD, R8, R8),
+ BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+ BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, R9, R0),
+ BPF_ALU64_REG(BPF_ADD, R9, R1),
+ BPF_ALU64_REG(BPF_ADD, R9, R2),
+ BPF_ALU64_REG(BPF_ADD, R9, R3),
+ BPF_ALU64_REG(BPF_ADD, R9, R4),
+ BPF_ALU64_REG(BPF_ADD, R9, R5),
+ BPF_ALU64_REG(BPF_ADD, R9, R6),
+ BPF_ALU64_REG(BPF_ADD, R9, R7),
+ BPF_ALU64_REG(BPF_ADD, R9, R8),
+ BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+ BPF_ALU64_REG(BPF_MOV, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2957380 } }
+ },
+ {
+ "INT: ADD 32-bit",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 20),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 2),
+ BPF_ALU32_IMM(BPF_MOV, R3, 3),
+ BPF_ALU32_IMM(BPF_MOV, R4, 4),
+ BPF_ALU32_IMM(BPF_MOV, R5, 5),
+ BPF_ALU32_IMM(BPF_MOV, R6, 6),
+ BPF_ALU32_IMM(BPF_MOV, R7, 7),
+ BPF_ALU32_IMM(BPF_MOV, R8, 8),
+ BPF_ALU32_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_ADD, R1, 10),
+ BPF_ALU64_IMM(BPF_ADD, R2, 10),
+ BPF_ALU64_IMM(BPF_ADD, R3, 10),
+ BPF_ALU64_IMM(BPF_ADD, R4, 10),
+ BPF_ALU64_IMM(BPF_ADD, R5, 10),
+ BPF_ALU64_IMM(BPF_ADD, R6, 10),
+ BPF_ALU64_IMM(BPF_ADD, R7, 10),
+ BPF_ALU64_IMM(BPF_ADD, R8, 10),
+ BPF_ALU64_IMM(BPF_ADD, R9, 10),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_ALU32_REG(BPF_ADD, R0, R2),
+ BPF_ALU32_REG(BPF_ADD, R0, R3),
+ BPF_ALU32_REG(BPF_ADD, R0, R4),
+ BPF_ALU32_REG(BPF_ADD, R0, R5),
+ BPF_ALU32_REG(BPF_ADD, R0, R6),
+ BPF_ALU32_REG(BPF_ADD, R0, R7),
+ BPF_ALU32_REG(BPF_ADD, R0, R8),
+ BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
+ BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R1, R0),
+ BPF_ALU32_REG(BPF_ADD, R1, R1),
+ BPF_ALU32_REG(BPF_ADD, R1, R2),
+ BPF_ALU32_REG(BPF_ADD, R1, R3),
+ BPF_ALU32_REG(BPF_ADD, R1, R4),
+ BPF_ALU32_REG(BPF_ADD, R1, R5),
+ BPF_ALU32_REG(BPF_ADD, R1, R6),
+ BPF_ALU32_REG(BPF_ADD, R1, R7),
+ BPF_ALU32_REG(BPF_ADD, R1, R8),
+ BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
+ BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R2, R0),
+ BPF_ALU32_REG(BPF_ADD, R2, R1),
+ BPF_ALU32_REG(BPF_ADD, R2, R2),
+ BPF_ALU32_REG(BPF_ADD, R2, R3),
+ BPF_ALU32_REG(BPF_ADD, R2, R4),
+ BPF_ALU32_REG(BPF_ADD, R2, R5),
+ BPF_ALU32_REG(BPF_ADD, R2, R6),
+ BPF_ALU32_REG(BPF_ADD, R2, R7),
+ BPF_ALU32_REG(BPF_ADD, R2, R8),
+ BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
+ BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R3, R0),
+ BPF_ALU32_REG(BPF_ADD, R3, R1),
+ BPF_ALU32_REG(BPF_ADD, R3, R2),
+ BPF_ALU32_REG(BPF_ADD, R3, R3),
+ BPF_ALU32_REG(BPF_ADD, R3, R4),
+ BPF_ALU32_REG(BPF_ADD, R3, R5),
+ BPF_ALU32_REG(BPF_ADD, R3, R6),
+ BPF_ALU32_REG(BPF_ADD, R3, R7),
+ BPF_ALU32_REG(BPF_ADD, R3, R8),
+ BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
+ BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R4, R0),
+ BPF_ALU32_REG(BPF_ADD, R4, R1),
+ BPF_ALU32_REG(BPF_ADD, R4, R2),
+ BPF_ALU32_REG(BPF_ADD, R4, R3),
+ BPF_ALU32_REG(BPF_ADD, R4, R4),
+ BPF_ALU32_REG(BPF_ADD, R4, R5),
+ BPF_ALU32_REG(BPF_ADD, R4, R6),
+ BPF_ALU32_REG(BPF_ADD, R4, R7),
+ BPF_ALU32_REG(BPF_ADD, R4, R8),
+ BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R5, R0),
+ BPF_ALU32_REG(BPF_ADD, R5, R1),
+ BPF_ALU32_REG(BPF_ADD, R5, R2),
+ BPF_ALU32_REG(BPF_ADD, R5, R3),
+ BPF_ALU32_REG(BPF_ADD, R5, R4),
+ BPF_ALU32_REG(BPF_ADD, R5, R5),
+ BPF_ALU32_REG(BPF_ADD, R5, R6),
+ BPF_ALU32_REG(BPF_ADD, R5, R7),
+ BPF_ALU32_REG(BPF_ADD, R5, R8),
+ BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
+ BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R6, R0),
+ BPF_ALU32_REG(BPF_ADD, R6, R1),
+ BPF_ALU32_REG(BPF_ADD, R6, R2),
+ BPF_ALU32_REG(BPF_ADD, R6, R3),
+ BPF_ALU32_REG(BPF_ADD, R6, R4),
+ BPF_ALU32_REG(BPF_ADD, R6, R5),
+ BPF_ALU32_REG(BPF_ADD, R6, R6),
+ BPF_ALU32_REG(BPF_ADD, R6, R7),
+ BPF_ALU32_REG(BPF_ADD, R6, R8),
+ BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
+ BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R7, R0),
+ BPF_ALU32_REG(BPF_ADD, R7, R1),
+ BPF_ALU32_REG(BPF_ADD, R7, R2),
+ BPF_ALU32_REG(BPF_ADD, R7, R3),
+ BPF_ALU32_REG(BPF_ADD, R7, R4),
+ BPF_ALU32_REG(BPF_ADD, R7, R5),
+ BPF_ALU32_REG(BPF_ADD, R7, R6),
+ BPF_ALU32_REG(BPF_ADD, R7, R7),
+ BPF_ALU32_REG(BPF_ADD, R7, R8),
+ BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
+ BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R8, R0),
+ BPF_ALU32_REG(BPF_ADD, R8, R1),
+ BPF_ALU32_REG(BPF_ADD, R8, R2),
+ BPF_ALU32_REG(BPF_ADD, R8, R3),
+ BPF_ALU32_REG(BPF_ADD, R8, R4),
+ BPF_ALU32_REG(BPF_ADD, R8, R5),
+ BPF_ALU32_REG(BPF_ADD, R8, R6),
+ BPF_ALU32_REG(BPF_ADD, R8, R7),
+ BPF_ALU32_REG(BPF_ADD, R8, R8),
+ BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
+ BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_REG(BPF_ADD, R9, R0),
+ BPF_ALU32_REG(BPF_ADD, R9, R1),
+ BPF_ALU32_REG(BPF_ADD, R9, R2),
+ BPF_ALU32_REG(BPF_ADD, R9, R3),
+ BPF_ALU32_REG(BPF_ADD, R9, R4),
+ BPF_ALU32_REG(BPF_ADD, R9, R5),
+ BPF_ALU32_REG(BPF_ADD, R9, R6),
+ BPF_ALU32_REG(BPF_ADD, R9, R7),
+ BPF_ALU32_REG(BPF_ADD, R9, R8),
+ BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
+ BPF_ALU32_REG(BPF_MOV, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2957380 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: SUB",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_REG(BPF_SUB, R0, R0),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_ALU64_REG(BPF_SUB, R0, R3),
+ BPF_ALU64_REG(BPF_SUB, R0, R4),
+ BPF_ALU64_REG(BPF_SUB, R0, R5),
+ BPF_ALU64_REG(BPF_SUB, R0, R6),
+ BPF_ALU64_REG(BPF_SUB, R0, R7),
+ BPF_ALU64_REG(BPF_SUB, R0, R8),
+ BPF_ALU64_REG(BPF_SUB, R0, R9),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R1, R0),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_REG(BPF_SUB, R1, R3),
+ BPF_ALU64_REG(BPF_SUB, R1, R4),
+ BPF_ALU64_REG(BPF_SUB, R1, R5),
+ BPF_ALU64_REG(BPF_SUB, R1, R6),
+ BPF_ALU64_REG(BPF_SUB, R1, R7),
+ BPF_ALU64_REG(BPF_SUB, R1, R8),
+ BPF_ALU64_REG(BPF_SUB, R1, R9),
+ BPF_ALU64_IMM(BPF_SUB, R1, 10),
+ BPF_ALU64_REG(BPF_SUB, R2, R0),
+ BPF_ALU64_REG(BPF_SUB, R2, R1),
+ BPF_ALU64_REG(BPF_SUB, R2, R3),
+ BPF_ALU64_REG(BPF_SUB, R2, R4),
+ BPF_ALU64_REG(BPF_SUB, R2, R5),
+ BPF_ALU64_REG(BPF_SUB, R2, R6),
+ BPF_ALU64_REG(BPF_SUB, R2, R7),
+ BPF_ALU64_REG(BPF_SUB, R2, R8),
+ BPF_ALU64_REG(BPF_SUB, R2, R9),
+ BPF_ALU64_IMM(BPF_SUB, R2, 10),
+ BPF_ALU64_REG(BPF_SUB, R3, R0),
+ BPF_ALU64_REG(BPF_SUB, R3, R1),
+ BPF_ALU64_REG(BPF_SUB, R3, R2),
+ BPF_ALU64_REG(BPF_SUB, R3, R4),
+ BPF_ALU64_REG(BPF_SUB, R3, R5),
+ BPF_ALU64_REG(BPF_SUB, R3, R6),
+ BPF_ALU64_REG(BPF_SUB, R3, R7),
+ BPF_ALU64_REG(BPF_SUB, R3, R8),
+ BPF_ALU64_REG(BPF_SUB, R3, R9),
+ BPF_ALU64_IMM(BPF_SUB, R3, 10),
+ BPF_ALU64_REG(BPF_SUB, R4, R0),
+ BPF_ALU64_REG(BPF_SUB, R4, R1),
+ BPF_ALU64_REG(BPF_SUB, R4, R2),
+ BPF_ALU64_REG(BPF_SUB, R4, R3),
+ BPF_ALU64_REG(BPF_SUB, R4, R5),
+ BPF_ALU64_REG(BPF_SUB, R4, R6),
+ BPF_ALU64_REG(BPF_SUB, R4, R7),
+ BPF_ALU64_REG(BPF_SUB, R4, R8),
+ BPF_ALU64_REG(BPF_SUB, R4, R9),
+ BPF_ALU64_IMM(BPF_SUB, R4, 10),
+ BPF_ALU64_REG(BPF_SUB, R5, R0),
+ BPF_ALU64_REG(BPF_SUB, R5, R1),
+ BPF_ALU64_REG(BPF_SUB, R5, R2),
+ BPF_ALU64_REG(BPF_SUB, R5, R3),
+ BPF_ALU64_REG(BPF_SUB, R5, R4),
+ BPF_ALU64_REG(BPF_SUB, R5, R6),
+ BPF_ALU64_REG(BPF_SUB, R5, R7),
+ BPF_ALU64_REG(BPF_SUB, R5, R8),
+ BPF_ALU64_REG(BPF_SUB, R5, R9),
+ BPF_ALU64_IMM(BPF_SUB, R5, 10),
+ BPF_ALU64_REG(BPF_SUB, R6, R0),
+ BPF_ALU64_REG(BPF_SUB, R6, R1),
+ BPF_ALU64_REG(BPF_SUB, R6, R2),
+ BPF_ALU64_REG(BPF_SUB, R6, R3),
+ BPF_ALU64_REG(BPF_SUB, R6, R4),
+ BPF_ALU64_REG(BPF_SUB, R6, R5),
+ BPF_ALU64_REG(BPF_SUB, R6, R7),
+ BPF_ALU64_REG(BPF_SUB, R6, R8),
+ BPF_ALU64_REG(BPF_SUB, R6, R9),
+ BPF_ALU64_IMM(BPF_SUB, R6, 10),
+ BPF_ALU64_REG(BPF_SUB, R7, R0),
+ BPF_ALU64_REG(BPF_SUB, R7, R1),
+ BPF_ALU64_REG(BPF_SUB, R7, R2),
+ BPF_ALU64_REG(BPF_SUB, R7, R3),
+ BPF_ALU64_REG(BPF_SUB, R7, R4),
+ BPF_ALU64_REG(BPF_SUB, R7, R5),
+ BPF_ALU64_REG(BPF_SUB, R7, R6),
+ BPF_ALU64_REG(BPF_SUB, R7, R8),
+ BPF_ALU64_REG(BPF_SUB, R7, R9),
+ BPF_ALU64_IMM(BPF_SUB, R7, 10),
+ BPF_ALU64_REG(BPF_SUB, R8, R0),
+ BPF_ALU64_REG(BPF_SUB, R8, R1),
+ BPF_ALU64_REG(BPF_SUB, R8, R2),
+ BPF_ALU64_REG(BPF_SUB, R8, R3),
+ BPF_ALU64_REG(BPF_SUB, R8, R4),
+ BPF_ALU64_REG(BPF_SUB, R8, R5),
+ BPF_ALU64_REG(BPF_SUB, R8, R6),
+ BPF_ALU64_REG(BPF_SUB, R8, R7),
+ BPF_ALU64_REG(BPF_SUB, R8, R9),
+ BPF_ALU64_IMM(BPF_SUB, R8, 10),
+ BPF_ALU64_REG(BPF_SUB, R9, R0),
+ BPF_ALU64_REG(BPF_SUB, R9, R1),
+ BPF_ALU64_REG(BPF_SUB, R9, R2),
+ BPF_ALU64_REG(BPF_SUB, R9, R3),
+ BPF_ALU64_REG(BPF_SUB, R9, R4),
+ BPF_ALU64_REG(BPF_SUB, R9, R5),
+ BPF_ALU64_REG(BPF_SUB, R9, R6),
+ BPF_ALU64_REG(BPF_SUB, R9, R7),
+ BPF_ALU64_REG(BPF_SUB, R9, R8),
+ BPF_ALU64_IMM(BPF_SUB, R9, 10),
+ BPF_ALU64_IMM(BPF_SUB, R0, 10),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_ALU64_REG(BPF_SUB, R0, R3),
+ BPF_ALU64_REG(BPF_SUB, R0, R4),
+ BPF_ALU64_REG(BPF_SUB, R0, R5),
+ BPF_ALU64_REG(BPF_SUB, R0, R6),
+ BPF_ALU64_REG(BPF_SUB, R0, R7),
+ BPF_ALU64_REG(BPF_SUB, R0, R8),
+ BPF_ALU64_REG(BPF_SUB, R0, R9),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 11 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: XOR",
+ .u.insns_int = {
+ BPF_ALU64_REG(BPF_SUB, R0, R0),
+ BPF_ALU64_REG(BPF_XOR, R1, R1),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 10),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_ALU64_REG(BPF_SUB, R1, R1),
+ BPF_ALU64_REG(BPF_XOR, R2, R2),
+ BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R2, R2),
+ BPF_ALU64_REG(BPF_XOR, R3, R3),
+ BPF_ALU64_IMM(BPF_MOV, R0, 10),
+ BPF_ALU64_IMM(BPF_MOV, R1, -1),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R3, R3),
+ BPF_ALU64_REG(BPF_XOR, R4, R4),
+ BPF_ALU64_IMM(BPF_MOV, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R5, -1),
+ BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R4, R4),
+ BPF_ALU64_REG(BPF_XOR, R5, R5),
+ BPF_ALU64_IMM(BPF_MOV, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R7, -1),
+ BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R5, 1),
+ BPF_ALU64_REG(BPF_SUB, R5, R5),
+ BPF_ALU64_REG(BPF_XOR, R6, R6),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R8, -1),
+ BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R6, R6),
+ BPF_ALU64_REG(BPF_XOR, R7, R7),
+ BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R7, R7),
+ BPF_ALU64_REG(BPF_XOR, R8, R8),
+ BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R8, R8),
+ BPF_ALU64_REG(BPF_XOR, R9, R9),
+ BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R9, R9),
+ BPF_ALU64_REG(BPF_XOR, R0, R0),
+ BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_SUB, R1, R1),
+ BPF_ALU64_REG(BPF_XOR, R0, R0),
+ BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ { /* Mainly checking JIT here. */
+ "INT: MUL",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 11),
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_MOV, R3, 3),
+ BPF_ALU64_IMM(BPF_MOV, R4, 4),
+ BPF_ALU64_IMM(BPF_MOV, R5, 5),
+ BPF_ALU64_IMM(BPF_MOV, R6, 6),
+ BPF_ALU64_IMM(BPF_MOV, R7, 7),
+ BPF_ALU64_IMM(BPF_MOV, R8, 8),
+ BPF_ALU64_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_REG(BPF_MUL, R0, R0),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_ALU64_REG(BPF_MUL, R0, R2),
+ BPF_ALU64_REG(BPF_MUL, R0, R3),
+ BPF_ALU64_REG(BPF_MUL, R0, R4),
+ BPF_ALU64_REG(BPF_MUL, R0, R5),
+ BPF_ALU64_REG(BPF_MUL, R0, R6),
+ BPF_ALU64_REG(BPF_MUL, R0, R7),
+ BPF_ALU64_REG(BPF_MUL, R0, R8),
+ BPF_ALU64_REG(BPF_MUL, R0, R9),
+ BPF_ALU64_IMM(BPF_MUL, R0, 10),
+ BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_MUL, R1, R0),
+ BPF_ALU64_REG(BPF_MUL, R1, R2),
+ BPF_ALU64_REG(BPF_MUL, R1, R3),
+ BPF_ALU64_REG(BPF_MUL, R1, R4),
+ BPF_ALU64_REG(BPF_MUL, R1, R5),
+ BPF_ALU64_REG(BPF_MUL, R1, R6),
+ BPF_ALU64_REG(BPF_MUL, R1, R7),
+ BPF_ALU64_REG(BPF_MUL, R1, R8),
+ BPF_ALU64_REG(BPF_MUL, R1, R9),
+ BPF_ALU64_IMM(BPF_MUL, R1, 10),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_LSH, R1, 32),
+ BPF_ALU64_IMM(BPF_ARSH, R1, 32),
+ BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_MUL, R2, R0),
+ BPF_ALU64_REG(BPF_MUL, R2, R1),
+ BPF_ALU64_REG(BPF_MUL, R2, R3),
+ BPF_ALU64_REG(BPF_MUL, R2, R4),
+ BPF_ALU64_REG(BPF_MUL, R2, R5),
+ BPF_ALU64_REG(BPF_MUL, R2, R6),
+ BPF_ALU64_REG(BPF_MUL, R2, R7),
+ BPF_ALU64_REG(BPF_MUL, R2, R8),
+ BPF_ALU64_REG(BPF_MUL, R2, R9),
+ BPF_ALU64_IMM(BPF_MUL, R2, 10),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_REG(BPF_MOV, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x35d97ef2 } }
+ },
+ { /* Mainly checking JIT here. */
+ "MOV REG64",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+ BPF_MOV64_REG(R1, R0),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_MOV64_REG(R4, R3),
+ BPF_MOV64_REG(R5, R4),
+ BPF_MOV64_REG(R6, R5),
+ BPF_MOV64_REG(R7, R6),
+ BPF_MOV64_REG(R8, R7),
+ BPF_MOV64_REG(R9, R8),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_ALU64_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_IMM(BPF_MOV, R2, 0),
+ BPF_ALU64_IMM(BPF_MOV, R3, 0),
+ BPF_ALU64_IMM(BPF_MOV, R4, 0),
+ BPF_ALU64_IMM(BPF_MOV, R5, 0),
+ BPF_ALU64_IMM(BPF_MOV, R6, 0),
+ BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_ALU64_IMM(BPF_MOV, R8, 0),
+ BPF_ALU64_IMM(BPF_MOV, R9, 0),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfefe } }
+ },
+ { /* Mainly checking JIT here. */
+ "MOV REG32",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+ BPF_MOV64_REG(R1, R0),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_MOV64_REG(R4, R3),
+ BPF_MOV64_REG(R5, R4),
+ BPF_MOV64_REG(R6, R5),
+ BPF_MOV64_REG(R7, R6),
+ BPF_MOV64_REG(R8, R7),
+ BPF_MOV64_REG(R9, R8),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU32_IMM(BPF_MOV, R2, 0),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0),
+ BPF_ALU32_IMM(BPF_MOV, R4, 0),
+ BPF_ALU32_IMM(BPF_MOV, R5, 0),
+ BPF_ALU32_IMM(BPF_MOV, R6, 0),
+ BPF_ALU32_IMM(BPF_MOV, R7, 0),
+ BPF_ALU32_IMM(BPF_MOV, R8, 0),
+ BPF_ALU32_IMM(BPF_MOV, R9, 0),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfefe } }
+ },
+ { /* Mainly checking JIT here. */
+ "LD IMM64",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
+ BPF_MOV64_REG(R1, R0),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_MOV64_REG(R4, R3),
+ BPF_MOV64_REG(R5, R4),
+ BPF_MOV64_REG(R6, R5),
+ BPF_MOV64_REG(R7, R6),
+ BPF_MOV64_REG(R8, R7),
+ BPF_MOV64_REG(R9, R8),
+ BPF_LD_IMM64(R0, 0x0LL),
+ BPF_LD_IMM64(R1, 0x0LL),
+ BPF_LD_IMM64(R2, 0x0LL),
+ BPF_LD_IMM64(R3, 0x0LL),
+ BPF_LD_IMM64(R4, 0x0LL),
+ BPF_LD_IMM64(R5, 0x0LL),
+ BPF_LD_IMM64(R6, 0x0LL),
+ BPF_LD_IMM64(R7, 0x0LL),
+ BPF_LD_IMM64(R8, 0x0LL),
+ BPF_LD_IMM64(R9, 0x0LL),
+ BPF_ALU64_REG(BPF_ADD, R0, R0),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_ALU64_REG(BPF_ADD, R0, R2),
+ BPF_ALU64_REG(BPF_ADD, R0, R3),
+ BPF_ALU64_REG(BPF_ADD, R0, R4),
+ BPF_ALU64_REG(BPF_ADD, R0, R5),
+ BPF_ALU64_REG(BPF_ADD, R0, R6),
+ BPF_ALU64_REG(BPF_ADD, R0, R7),
+ BPF_ALU64_REG(BPF_ADD, R0, R8),
+ BPF_ALU64_REG(BPF_ADD, R0, R9),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfefe } }
+ },
+ {
+ "INT: ALU MIX",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 11),
+ BPF_ALU64_IMM(BPF_ADD, R0, -1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_IMM(BPF_XOR, R2, 3),
+ BPF_ALU64_REG(BPF_DIV, R0, R2),
+ BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOD, R0, 3),
+ BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "INT: shifts by register",
+ .u.insns_int = {
+ BPF_MOV64_IMM(R0, -1234),
+ BPF_MOV64_IMM(R1, 1),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R0, R2),
+ BPF_MOV32_IMM(R4, -1234),
+ BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_AND, R4, 63),
+ BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
+ BPF_MOV64_IMM(R3, 47),
+ BPF_ALU64_REG(BPF_ARSH, R0, R3),
+ BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R2, 1),
+ BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 4),
+ BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R4, 5),
+ BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
+ BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+#ifdef CONFIG_32BIT
+ {
+ "INT: 32-bit context pointer word order and zero-extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+#endif
+ {
+ "check: missing ret",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 1),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "check: div_k_0",
+ .u.insns = {
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "check: unknown insn",
+ .u.insns = {
+ /* seccomp insn, rejected in socket filter */
+ BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "check: out of range spill/fill",
+ .u.insns = {
+ BPF_STMT(BPF_STX, 16),
+ BPF_STMT(BPF_RET | BPF_K, 0)
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "JUMPS + HOLES",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
+ BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
+ BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
+ BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC,
+ { 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
+ 0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
+ 0x08, 0x00,
+ 0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
+ 0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
+ 0xc0, 0xa8, 0x33, 0x01,
+ 0xc0, 0xa8, 0x33, 0x02,
+ 0xbb, 0xb6,
+ 0xa9, 0xfa,
+ 0x00, 0x14, 0x00, 0x00,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
+ 0xcc, 0xcc, 0xcc, 0xcc },
+ { { 88, 0x001b } }
+ },
+ {
+ "check: RET X",
+ .u.insns = {
+ BPF_STMT(BPF_RET | BPF_X, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "check: LDX + RET X",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 42),
+ BPF_STMT(BPF_RET | BPF_X, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ { /* Mainly checking JIT here. */
+ "M[]: alt STX + LDX",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 100),
+ BPF_STMT(BPF_STX, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 1),
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 2),
+ BPF_STMT(BPF_LDX | BPF_MEM, 2),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 3),
+ BPF_STMT(BPF_LDX | BPF_MEM, 3),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 4),
+ BPF_STMT(BPF_LDX | BPF_MEM, 4),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 5),
+ BPF_STMT(BPF_LDX | BPF_MEM, 5),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 6),
+ BPF_STMT(BPF_LDX | BPF_MEM, 6),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 7),
+ BPF_STMT(BPF_LDX | BPF_MEM, 7),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 8),
+ BPF_STMT(BPF_LDX | BPF_MEM, 8),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 9),
+ BPF_STMT(BPF_LDX | BPF_MEM, 9),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 10),
+ BPF_STMT(BPF_LDX | BPF_MEM, 10),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 11),
+ BPF_STMT(BPF_LDX | BPF_MEM, 11),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 12),
+ BPF_STMT(BPF_LDX | BPF_MEM, 12),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 13),
+ BPF_STMT(BPF_LDX | BPF_MEM, 13),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 14),
+ BPF_STMT(BPF_LDX | BPF_MEM, 14),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_STX, 15),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 116 } },
+ },
+ { /* Mainly checking JIT here. */
+ "M[]: full STX + full LDX",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
+ BPF_STMT(BPF_STX, 0),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
+ BPF_STMT(BPF_STX, 1),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
+ BPF_STMT(BPF_STX, 2),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
+ BPF_STMT(BPF_STX, 3),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
+ BPF_STMT(BPF_STX, 4),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
+ BPF_STMT(BPF_STX, 5),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
+ BPF_STMT(BPF_STX, 6),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
+ BPF_STMT(BPF_STX, 7),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
+ BPF_STMT(BPF_STX, 8),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
+ BPF_STMT(BPF_STX, 9),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
+ BPF_STMT(BPF_STX, 10),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
+ BPF_STMT(BPF_STX, 11),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
+ BPF_STMT(BPF_STX, 12),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
+ BPF_STMT(BPF_STX, 13),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
+ BPF_STMT(BPF_STX, 14),
+ BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
+ BPF_STMT(BPF_STX, 15),
+ BPF_STMT(BPF_LDX | BPF_MEM, 0),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 1),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 2),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 3),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 4),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 5),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 6),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 7),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 8),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 9),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 10),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 11),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 12),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 13),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 14),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_LDX | BPF_MEM, 15),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x2a5a5e5 } },
+ },
+ {
+ "check: SKF_AD_MAX",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF + SKF_AD_MAX),
+ BPF_STMT(BPF_RET | BPF_A, 0),
+ },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
+ },
+ { /* Passes checker but fails during runtime. */
+ "LD [SKF_AD_OFF-1]",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+ SKF_AD_OFF - 1),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ },
+ CLASSIC,
+ { },
+ { { 1, 0 } },
+ },
+ {
+ "load 64-bit immediate",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x567800001234LL),
+ BPF_MOV64_REG(R2, R1),
+ BPF_MOV64_REG(R3, R2),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_IMM(BPF_LSH, R3, 32),
+ BPF_ALU64_IMM(BPF_RSH, R3, 32),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
+ BPF_EXIT_INSN(),
+ BPF_LD_IMM64(R0, 0x1ffffffffLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ /* BPF_ALU | BPF_MOV | BPF_X */
+ {
+ "ALU_MOV_X: dst = 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_MOV_X: dst = 4294967295",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU64_MOV_X: dst = 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOV_X: dst = 4294967295",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ /* BPF_ALU | BPF_MOV | BPF_K */
+ {
+ "ALU_MOV_K: dst = 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_MOV_K: dst = 4294967295",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
+ BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_MOV_K: small negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "ALU_MOV_K: small negative zero extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU_MOV_K: large negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123456789 } }
+ },
+ {
+ "ALU_MOV_K: large negative zero extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_MOV_K: dst = 2",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOV_K: dst = 2147483647",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_OR_K: dst = 0x0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0),
+ BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOV_K: dst = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOV_K: small negative",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "ALU64_MOV_K: small negative sign extension",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } }
+ },
+ {
+ "ALU64_MOV_K: large negative",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123456789 } }
+ },
+ {
+ "ALU64_MOV_K: large negative sign extension",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } }
+ },
+ /* BPF_ALU | BPF_ADD | BPF_X */
+ {
+ "ALU_ADD_X: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_ADD_X: 1 + 4294967294 = 4294967295",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU_ADD_X: 2 + 4294967294 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_LD_IMM64(R1, 4294967294U),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_ADD_X: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_ADD_X: 1 + 4294967294 = 4294967295",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU64_ADD_X: 2 + 4294967294 = 4294967296",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_LD_IMM64(R1, 4294967294U),
+ BPF_LD_IMM64(R2, 4294967296ULL),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
+ BPF_MOV32_IMM(R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_ALU | BPF_ADD | BPF_K */
+ {
+ "ALU_ADD_K: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_ADD_K: 3 + 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_ADD, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_ADD_K: 1 + 4294967294 = 4294967295",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4294967295U } },
+ },
+ {
+ "ALU_ADD_K: 4294967294 + 2 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967294U),
+ BPF_ALU32_IMM(BPF_ADD, R0, 2),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x00000000ffffffff),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0xffff = 0xffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffff),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x7fffffff),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x80000000),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x80008000),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 1 + 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_ADD_K: 3 + 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_ADD, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_ADD_K: 1 + 2147483646 = 2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_ADD_K: 4294967294 + 2 = 4294967296",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967294U),
+ BPF_LD_IMM64(R1, 4294967296ULL),
+ BPF_ALU64_IMM(BPF_ADD, R0, 2),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483646),
+ BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ {
+ "ALU64_ADD_K: 1 + 0 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x1),
+ BPF_LD_IMM64(R3, 0x1),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + 0xffff = 0xffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffff),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffffffff80000000LL),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffffffff80008000LL),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_SUB | BPF_X */
+ {
+ "ALU_SUB_X: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_SUB_X: 4294967295 - 4294967294 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU32_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_SUB_X: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_SUB_X: 4294967295 - 4294967294 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_ALU | BPF_SUB | BPF_K */
+ {
+ "ALU_SUB_K: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_SUB, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_SUB_K: 3 - 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_SUB, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_SUB_K: 4294967295 - 4294967294 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_SUB_K: 3 - 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_SUB, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_SUB_K: 3 - 0 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_SUB, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_SUB_K: 4294967294 - 4294967295 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967294U),
+ BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ {
+ "ALU64_ADD_K: 2147483646 - 2147483647 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483646),
+ BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU | BPF_MUL | BPF_X */
+ {
+ "ALU_MUL_X: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 3),
+ BPF_ALU32_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
+ BPF_ALU32_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xFFFFFFF0 } },
+ },
+ {
+ "ALU_MUL_X: -1 * -1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -1),
+ BPF_ALU32_IMM(BPF_MOV, R1, -1),
+ BPF_ALU32_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_MUL_X: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 3),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU64_MUL_X: 1 * 2147483647 = 2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_MUL_X: 64x64 multiply, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+ BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xe5618cf0 } }
+ },
+ {
+ "ALU64_MUL_X: 64x64 multiply, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+ BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x2236d88f } }
+ },
+ /* BPF_ALU | BPF_MUL | BPF_K */
+ {
+ "ALU_MUL_K: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MUL, R0, 3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU_MUL_K: 3 * 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MUL, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xFFFFFFF0 } },
+ },
+ {
+ "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x1),
+ BPF_LD_IMM64(R3, 0x00000000ffffffff),
+ BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MUL_K: 2 * 3 = 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU64_IMM(BPF_MUL, R0, 3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 6 } },
+ },
+ {
+ "ALU64_MUL_K: 3 * 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_MUL, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_MUL_K: 1 * 2147483647 = 2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2147483647 } },
+ },
+ {
+ "ALU64_MUL_K: 1 * -2147483647 = -2147483647",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -2147483647 } },
+ },
+ {
+ "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x1),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MUL_K: 64x32 multiply, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xe242d208 } }
+ },
+ {
+ "ALU64_MUL_K: 64x32 multiply, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xc28f5c28 } }
+ },
+ /* BPF_ALU | BPF_DIV | BPF_X */
+ {
+ "ALU_DIV_X: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_DIV_X: 4294967295 / 4294967295 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+ BPF_ALU32_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_DIV_X: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_DIV_X: 2147483647 / 2147483647 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+ BPF_ALU64_REG(BPF_DIV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0x0000000000000001LL),
+ BPF_ALU64_REG(BPF_DIV, R2, R4),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_DIV | BPF_K */
+ {
+ "ALU_DIV_K: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU32_IMM(BPF_DIV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_DIV_K: 3 / 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_DIV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_DIV_K: 4294967295 / 4294967295 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0x1UL),
+ BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_DIV_K: 6 / 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 6),
+ BPF_ALU64_IMM(BPF_DIV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_DIV_K: 3 / 1 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_DIV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_DIV_K: 2147483647 / 2147483647 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0x0000000000000001LL),
+ BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_X */
+ {
+ "ALU_MOD_X: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_MOD_X: 4294967295 % 4294967293 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
+ BPF_ALU32_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOD_X: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_MOD_X: 2147483647 % 2147483645 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
+ BPF_ALU64_REG(BPF_MOD, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_K */
+ {
+ "ALU_MOD_K: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_MOD_K: 3 % 1 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "ALU_MOD_K: 4294967295 % 4294967293 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967295U),
+ BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_MOD_K: 3 % 2 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_MOD, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_MOD_K: 3 % 1 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_MOD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "ALU64_MOD_K: 2147483647 % 2147483645 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2147483647),
+ BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ /* BPF_ALU | BPF_AND | BPF_X */
+ {
+ "ALU_AND_X: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU32_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_AND_X: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU64_REG(BPF_AND, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ /* BPF_ALU | BPF_AND | BPF_K */
+ {
+ "ALU_AND_K: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU32_IMM(BPF_AND, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU_AND_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_AND, R0, 15),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4 } }
+ },
+ {
+ "ALU_AND_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xa1b2c3d4 } }
+ },
+ {
+ "ALU_AND_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_AND_K: 3 & 2 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_AND, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xffffffff),
+ BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000000000000000LL),
+ BPF_ALU64_IMM(BPF_AND, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+ BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_AND_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000090b0d0fLL),
+ BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_AND_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL),
+ BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ /* BPF_ALU | BPF_OR | BPF_X */
+ {
+ "ALU_OR_X: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU32_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_OR_X: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ /* BPF_ALU | BPF_OR | BPF_K */
+ {
+ "ALU_OR_K: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_OR, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU_OR_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_OR, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01020305 } }
+ },
+ {
+ "ALU_OR_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xa1b2c3d4 } }
+ },
+ {
+ "ALU_OR_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_OR_K: 1 | 2 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_OR, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ },
+ {
+ "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+ BPF_ALU64_IMM(BPF_OR, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000000000000000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_OR_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x012345678fafcfefLL),
+ BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_OR_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL),
+ BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ /* BPF_ALU | BPF_XOR | BPF_X */
+ {
+ "ALU_XOR_X: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU32_IMM(BPF_MOV, R1, 6),
+ BPF_ALU32_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU32_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ {
+ "ALU64_XOR_X: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU32_IMM(BPF_MOV, R1, 6),
+ BPF_ALU64_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_ALU64_REG(BPF_XOR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ /* BPF_ALU | BPF_XOR | BPF_K */
+ {
+ "ALU_XOR_K: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU32_IMM(BPF_XOR, R0, 6),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ {
+ "ALU_XOR_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_XOR, R0, 15),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x0102030b } }
+ },
+ {
+ "ALU_XOR_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x5e4d3c2b } }
+ },
+ {
+ "ALU_XOR_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000795b3d1fLL),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_XOR_K: 5 ^ 6 = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 5),
+ BPF_ALU64_IMM(BPF_XOR, R0, 6),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } },
+ },
+ {
+ "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+ BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+ BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
+ BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0000000000000000LL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_XOR_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_XOR_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ /* BPF_ALU | BPF_LSH | BPF_X */
+ {
+ "ALU_LSH_X: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_LSH_X: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ {
+ "ALU_LSH_X: 0x12345678 << 12 = 0x45678000",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x45678000 } }
+ },
+ {
+ "ALU64_LSH_X: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_LSH_X: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ {
+ "ALU64_LSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xbcdef000 } }
+ },
+ {
+ "ALU64_LSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x3456789a } }
+ },
+ {
+ "ALU64_LSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x9abcdef0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } }
+ },
+ /* BPF_ALU | BPF_LSH | BPF_K */
+ {
+ "ALU_LSH_K: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_LSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU_LSH_K: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU32_IMM(BPF_LSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ {
+ "ALU_LSH_K: 0x12345678 << 12 = 0x45678000",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_LSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x45678000 } }
+ },
+ {
+ "ALU_LSH_K: 0x12345678 << 0 = 0x12345678",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_LSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12345678 } }
+ },
+ {
+ "ALU64_LSH_K: 1 << 1 = 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_LSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "ALU64_LSH_K: 1 << 31 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 1),
+ BPF_ALU64_IMM(BPF_LSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x80000000 } },
+ },
+ {
+ "ALU64_LSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xbcdef000 } }
+ },
+ {
+ "ALU64_LSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x3456789a } }
+ },
+ {
+ "ALU64_LSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x9abcdef0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ /* BPF_ALU | BPF_RSH | BPF_X */
+ {
+ "ALU_RSH_X: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_RSH_X: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_RSH_X: 0x12345678 >> 20 = 0x123",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, 20),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x123 } }
+ },
+ {
+ "ALU64_RSH_X: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_X: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU32_IMM(BPF_MOV, R1, 31),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_RSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x00081234 } }
+ },
+ {
+ "ALU64_RSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x08123456 } }
+ },
+ {
+ "ALU64_RSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_RSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_RSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ /* BPF_ALU | BPF_RSH | BPF_K */
+ {
+ "ALU_RSH_K: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU32_IMM(BPF_RSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_RSH_K: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU32_IMM(BPF_RSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU_RSH_K: 0x12345678 >> 20 = 0x123",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_RSH, R0, 20),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x123 } }
+ },
+ {
+ "ALU_RSH_K: 0x12345678 >> 0 = 0x12345678",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_RSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12345678 } }
+ },
+ {
+ "ALU64_RSH_K: 2 >> 1 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_ALU64_IMM(BPF_RSH, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_K: 0x80000000 >> 31 = 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x80000000),
+ BPF_ALU64_IMM(BPF_RSH, R0, 31),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "ALU64_RSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_RSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x00081234 } }
+ },
+ {
+ "ALU64_RSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x08123456 } }
+ },
+ {
+ "ALU64_RSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_RSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ /* BPF_ALU | BPF_ARSH | BPF_X */
+ {
+ "ALU32_ARSH_X: -1234 >> 7 = -10",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 7),
+ BPF_ALU32_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -10 } }
+ },
+ {
+ "ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 40),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff00ff } },
+ },
+ {
+ "ALU64_ARSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_ARSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfff81234 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xf8123456 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_ARSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ /* BPF_ALU | BPF_ARSH | BPF_K */
+ {
+ "ALU32_ARSH_K: -1234 >> 7 = -10",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_ARSH, R0, 7),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -10 } }
+ },
+ {
+ "ALU32_ARSH_K: -1234 >> 0 = -1234",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_ARSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1234 } }
+ },
+ {
+ "ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff00ff } },
+ },
+ {
+ "ALU64_ARSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_ARSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfff81234 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xf8123456 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xf123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ /* BPF_ALU | BPF_NEG */
+ {
+ "ALU_NEG: -(3) = -3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 3),
+ BPF_ALU32_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ {
+ "ALU_NEG: -(-3) = 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -3),
+ BPF_ALU32_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ {
+ "ALU64_NEG: -(3) = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 3),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ {
+ "ALU64_NEG: -(-3) = 3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -3),
+ BPF_ALU64_IMM(BPF_NEG, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 3 } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_BE */
+ {
+ "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be16(0xcdef) } },
+ },
+ {
+ "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be32(0x89abcdef) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */
+ {
+ "ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_be32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_LE */
+ {
+ "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le16(0xcdef) } },
+ },
+ {
+ "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le32(0x89abcdef) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } },
+ },
+ /* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */
+ {
+ "ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le16(0x3210) } },
+ },
+ {
+ "ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, cpu_to_le32(0x76543210) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } },
+ },
+ {
+ "ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
+ },
+ /* BPF_LDX_MEM B/H/W/DW */
+ {
+ "BPF_LDX_MEM | BPF_B, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000008ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEM(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_B, R1, R2, -256),
+ BPF_LDX_MEM(BPF_B, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 256),
+ BPF_LDX_MEM(BPF_B, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_B, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000000088ULL),
+ BPF_STX_MEM(BPF_B, R1, R2, 4096),
+ BPF_LDX_MEM(BPF_B, R0, R1, 4096),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 4096 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000000000708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEM(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_H, R1, R2, -256),
+ BPF_LDX_MEM(BPF_H, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 256),
+ BPF_LDX_MEM(BPF_H, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 8192),
+ BPF_LDX_MEM(BPF_H, R0, R1, 8192),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 8192 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_H, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000000008788ULL),
+ BPF_STX_MEM(BPF_H, R1, R2, 13),
+ BPF_LDX_MEM(BPF_H, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R2, 0x0000000005060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R2, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEM(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEM(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_W, R1, R2, -256),
+ BPF_LDX_MEM(BPF_W, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 256),
+ BPF_LDX_MEM(BPF_W, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 16384),
+ BPF_LDX_MEM(BPF_W, R0, R1, 16384),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 16384 + 16, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_W, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x0000000085868788ULL),
+ BPF_STX_MEM(BPF_W, R1, R2, 13),
+ BPF_LDX_MEM(BPF_W, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, base",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0102030405060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, negative offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_ALU64_IMM(BPF_ADD, R1, 512),
+ BPF_STX_MEM(BPF_DW, R1, R2, -256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, -256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, small positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 256),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 256),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 512, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, large positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 32760),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32768, 0 } },
+ .stack_depth = 0,
+ },
+ {
+ "BPF_LDX_MEM | BPF_DW, unaligned positive offset",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_STX_MEM(BPF_DW, R1, R2, 13),
+ BPF_LDX_MEM(BPF_DW, R0, R1, 13),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_LARGE_MEM,
+ { },
+ { { 32, 0 } },
+ .stack_depth = 0,
+ },
+ /* BPF_STX_MEM B/H/W/DW */
+ {
+ "BPF_STX_MEM | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d0e008ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+ BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_B, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d0e088ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_B, R10, R2, -1),
+#else
+ BPF_STX_MEM(BPF_B, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d00708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+ BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_H, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b0c0d08788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_H, R10, R2, -2),
+#else
+ BPF_STX_MEM(BPF_H, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x0102030405060708ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b005060708ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+ BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_STX_MEM | BPF_W, MSB set",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
+ BPF_LD_IMM64(R2, 0x8182838485868788ULL),
+ BPF_LD_IMM64(R3, 0x8090a0b085868788ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_STX_MEM(BPF_W, R10, R2, -4),
+#else
+ BPF_STX_MEM(BPF_W, R10, R2, -8),
+#endif
+ BPF_LDX_MEM(BPF_DW, R0, R10, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R3, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
+ {
+ "ST_MEM_B: Store/Load byte: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_B, R10, -40, 0xff),
+ BPF_LDX_MEM(BPF_B, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_B: Store/Load byte: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7f } },
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_B: Store/Load byte: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffLL),
+ BPF_STX_MEM(BPF_B, R10, R1, -40),
+ BPF_LDX_MEM(BPF_B, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_H: Store/Load half word: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_H: Store/Load half word: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7fff } },
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_H: Store/Load half word: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffffLL),
+ BPF_STX_MEM(BPF_H, R10, R1, -40),
+ BPF_LDX_MEM(BPF_H, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_W: Store/Load word: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_W: Store/Load word: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7fffffff } },
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_W: Store/Load word: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffLL),
+ BPF_STX_MEM(BPF_W, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_DW: Store/Load double word: max negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_DW: Store/Load double word: max negative 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
+ BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+ BPF_LDX_MEM(BPF_DW, R2, R10, -40),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ .stack_depth = 40,
+ },
+ {
+ "ST_MEM_DW: Store/Load double word: max positive",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x7fffffff } },
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_DW: Store/Load double word: max negative",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } },
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_DW: Store double word: first word in memory",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+#ifdef __BIG_ENDIAN
+ { { 0, 0x01234567 } },
+#else
+ { { 0, 0x89abcdef } },
+#endif
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_DW: Store double word: second word in memory",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+#ifdef __BIG_ENDIAN
+ { { 0, 0x89abcdef } },
+#else
+ { { 0, 0x01234567 } },
+#endif
+ .stack_depth = 40,
+ },
+ /* BPF_STX | BPF_ATOMIC | BPF_W/DW */
+ {
+ "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxw,
+ },
+ {
+ "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxdw,
+ },
+ /*
+ * Exhaustive tests of atomic operation variants.
+ * Individual tests are expanded from template macros for all
+ * combinations of ALU operation, word size and fetching.
+ */
+#define BPF_ATOMIC_POISON(width) ((width) == BPF_W ? (0xbaadf00dULL << 32) : 0)
+
+#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R5, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R5, -40), \
+ BPF_LDX_MEM(width, R0, R10, -40), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, result } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU64_REG(BPF_MOV, R1, R10), \
+ BPF_LD_IMM64(R0, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(BPF_W, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R0, -40), \
+ BPF_ALU64_REG(BPF_MOV, R0, R10), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU64_REG(BPF_MOV, R0, R10), \
+ BPF_LD_IMM64(R1, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R1, -40), \
+ BPF_ALU64_REG(BPF_SUB, R0, R10), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test fetch: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R3, (update) | BPF_ATOMIC_POISON(width)), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R3, -40), \
+ BPF_ALU32_REG(BPF_MOV, R0, R3), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, (op) & BPF_FETCH ? old : update } }, \
+ .stack_depth = 40, \
+}
+ /* BPF_ATOMIC | BPF_W: BPF_ADD */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_DW: BPF_ADD */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_W: BPF_AND */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_DW: BPF_AND */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_W: BPF_OR */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_DW: BPF_OR */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_W: BPF_XOR */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_DW: BPF_XOR */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_W: BPF_XCHG */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ /* BPF_ATOMIC | BPF_DW: BPF_XCHG */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+#undef BPF_ATOMIC_POISON
+#undef BPF_ATOMIC_OP_TEST1
+#undef BPF_ATOMIC_OP_TEST2
+#undef BPF_ATOMIC_OP_TEST3
+#undef BPF_ATOMIC_OP_TEST4
+ /* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_ALU32_REG(BPF_MOV, R0, R3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } },
+ .stack_depth = 40,
+ },
+ /* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ /* BPF_JMP32 | BPF_JEQ | BPF_K */
+ {
+ "JMP32_JEQ_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JEQ_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345678 } }
+ },
+ {
+ "JMP32_JEQ_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JEQ | BPF_X */
+ {
+ "JMP32_JEQ_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+ BPF_JMP32_REG(BPF_JEQ, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+ BPF_JMP32_REG(BPF_JEQ, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1234 } }
+ },
+ /* BPF_JMP32 | BPF_JNE | BPF_K */
+ {
+ "JMP32_JNE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 321, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JNE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+ BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345678 } }
+ },
+ {
+ "JMP32_JNE_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JNE, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JNE | BPF_X */
+ {
+ "JMP32_JNE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1234 } }
+ },
+ /* BPF_JMP32 | BPF_JSET | BPF_K */
+ {
+ "JMP32_JSET_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 2, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 3, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "JMP32_JSET_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000),
+ BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x40000000 } }
+ },
+ {
+ "JMP32_JSET_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSET, R0, -1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JSET | BPF_X */
+ {
+ "JMP32_JSET_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 8),
+ BPF_ALU32_IMM(BPF_MOV, R1, 7),
+ BPF_JMP32_REG(BPF_JSET, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 8 } }
+ },
+ /* BPF_JMP32 | BPF_JGT | BPF_K */
+ {
+ "JMP32_JGT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JGT, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JGT, R0, 122, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JGT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGT | BPF_X */
+ {
+ "JMP32_JGT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JGT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JGT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGE | BPF_K */
+ {
+ "JMP32_JGE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JGE, R0, 124, 1),
+ BPF_JMP32_IMM(BPF_JGE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JGE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGE | BPF_X */
+ {
+ "JMP32_JGE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JGE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+ BPF_JMP32_REG(BPF_JGE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLT | BPF_K */
+ {
+ "JMP32_JLT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JLT, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JLT, R0, 124, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JLT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1),
+ BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLT | BPF_X */
+ {
+ "JMP32_JLT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JLT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JLT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLE | BPF_K */
+ {
+ "JMP32_JLE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JLE, R0, 122, 1),
+ BPF_JMP32_IMM(BPF_JLE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JLE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1),
+ BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLE | BPF_X */
+ {
+ "JMP32_JLE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JLE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+ BPF_JMP32_REG(BPF_JLE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JSGT | BPF_K */
+ {
+ "JMP32_JSGT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSGT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGT | BPF_X */
+ {
+ "JMP32_JSGT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSGT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+ BPF_JMP32_REG(BPF_JSGT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGE | BPF_K */
+ {
+ "JMP32_JSGE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSGE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGE | BPF_X */
+ {
+ "JMP32_JSGE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+ BPF_JMP32_REG(BPF_JSGE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSGE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLT | BPF_K */
+ {
+ "JMP32_JSLT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSLT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLT | BPF_X */
+ {
+ "JMP32_JSLT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSLT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+ BPF_JMP32_REG(BPF_JSLT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLE | BPF_K */
+ {
+ "JMP32_JSLE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSLE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLE | BPF_K */
+ {
+ "JMP32_JSLE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+ BPF_JMP32_REG(BPF_JSLE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSLE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP | BPF_EXIT */
+ {
+ "JMP_EXIT",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x4711 } },
+ },
+ /* BPF_JMP | BPF_JA */
+ {
+ "JMP_JA: Unconditional jump: if (true) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSLT | BPF_K */
+ {
+ "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+ BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGT | BPF_K */
+ {
+ "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSLE | BPF_K */
+ {
+ "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
+ BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: value walk 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_SUB, R1, 1),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_K: Signed jump: value walk 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_SUB, R1, 2),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_SUB, R1, 2),
+ BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGE | BPF_K */
+ {
+ "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+ BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_K: Signed jump: value walk 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -3),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
+ BPF_ALU64_IMM(BPF_ADD, R1, 1),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_ADD, R1, 1),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, R1, 1),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_K: Signed jump: value walk 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -3),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
+ BPF_EXIT_INSN(), /* bad exit */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGT | BPF_K */
+ {
+ "JMP_JGT_K: if (3 > 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLT | BPF_K */
+ {
+ "JMP_JLT_K: if (2 < 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 2),
+ BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGE | BPF_K */
+ {
+ "JMP_JGE_K: if (3 >= 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLE | BPF_K */
+ {
+ "JMP_JLE_K: if (2 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 2),
+ BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
+ {
+ "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
+ .u.insns_int = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+ BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
+ BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_K: if (3 >= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLT | BPF_K jump backwards */
+ {
+ "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
+ .u.insns_int = {
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+ BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+ BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
+ BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_K: if (3 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JNE | BPF_K */
+ {
+ "JMP_JNE_K: if (3 != 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JEQ | BPF_K */
+ {
+ "JMP_JEQ_K: if (3 == 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSET | BPF_K */
+ {
+ "JMP_JSET_K: if (0x3 & 0x2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGT | BPF_X */
+ {
+ "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSLT | BPF_X */
+ {
+ "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSGE | BPF_X */
+ {
+ "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSLE | BPF_X */
+ {
+ "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -2),
+ BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, -1),
+ BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGT | BPF_X */
+ {
+ "JMP_JGT_X: if (3 > 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLT | BPF_X */
+ {
+ "JMP_JLT_X: if (2 < 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, 1),
+ BPF_JMP_REG(BPF_JLT, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JGE | BPF_X */
+ {
+ "JMP_JGE_X: if (3 >= 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_X: if (3 >= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JLE | BPF_X */
+ {
+ "JMP_JLE_X: if (2 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: if (3 <= 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JLE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ /* Mainly testing JIT + imm64 here. */
+ "JMP_JGE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JLE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JLE, R2, R1, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JNE | BPF_X */
+ {
+ "JMP_JNE_X: if (3 != 2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JEQ | BPF_X */
+ {
+ "JMP_JEQ_X: if (3 == 3) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 3),
+ BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ /* BPF_JMP | BPF_JSET | BPF_X */
+ {
+ "JMP_JSET_X: if (0x3 & 0x2) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JSET, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 0xffffffff),
+ BPF_JMP_REG(BPF_JSET, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JA: Jump, gap, jump, ...",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xababcbac } },
+ .fill_helper = bpf_fill_ja,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Maximum possible literals",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xffffffff } },
+ .fill_helper = bpf_fill_maxinsns1,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Single literal",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xfefefefe } },
+ .fill_helper = bpf_fill_maxinsns2,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Run/add until end",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0x947bf368 } },
+ .fill_helper = bpf_fill_maxinsns3,
+ },
+ {
+ "BPF_MAXINSNS: Too many instructions",
+ { },
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+ { },
+ { },
+ .fill_helper = bpf_fill_maxinsns4,
+ .expected_errcode = -EINVAL,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Very long jump",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xabababab } },
+ .fill_helper = bpf_fill_maxinsns5,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Ctx heavy transformations",
+ { },
+ CLASSIC,
+ { },
+ {
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
+ },
+ .fill_helper = bpf_fill_maxinsns6,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Call heavy transformations",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 1, 0 }, { 10, 0 } },
+ .fill_helper = bpf_fill_maxinsns7,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Jump heavy test",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xffffffff } },
+ .fill_helper = bpf_fill_maxinsns8,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Very long jump backwards",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0xcbababab } },
+ .fill_helper = bpf_fill_maxinsns9,
+ },
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Edge hopping nuthouse",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0xabababac } },
+ .fill_helper = bpf_fill_maxinsns10,
+ },
+ {
+ "BPF_MAXINSNS: Jump, gap, jump, ...",
+ { },
+ CLASSIC | FLAG_NO_DATA,
+ { },
+ { { 0, 0xababcbac } },
+ .fill_helper = bpf_fill_maxinsns11,
+ },
+ {
+ "BPF_MAXINSNS: jump over MSH",
+ { },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ { 0xfa, 0xfb, 0xfc, 0xfd, },
+ { { 4, 0xabababab } },
+ .fill_helper = bpf_fill_maxinsns12,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "BPF_MAXINSNS: exec all MSH",
+ { },
+ CLASSIC,
+ { 0xfa, 0xfb, 0xfc, 0xfd, },
+ { { 4, 0xababab83 } },
+ .fill_helper = bpf_fill_maxinsns13,
+ },
+ {
+ "BPF_MAXINSNS: ld_abs+get_processor_id",
+ { },
+ CLASSIC,
+ { },
+ { { 1, 0xbee } },
+ .fill_helper = bpf_fill_ld_abs_get_processor_id,
+ },
+ /*
+ * LD_IND / LD_ABS on fragmented SKBs
+ */
+ {
+ "LD_IND byte frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x42} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND halfword frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x4344} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND word frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x21071983} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_IND halfword mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x0519} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_IND word mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x25051982} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_ABS byte frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x42} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS halfword frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x4344} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS word frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { },
+ { {0x40, 0x21071983} },
+ .frag_data = {
+ 0x42, 0x00, 0x00, 0x00,
+ 0x43, 0x44, 0x00, 0x00,
+ 0x21, 0x07, 0x19, 0x83,
+ },
+ },
+ {
+ "LD_ABS halfword mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x0519} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ {
+ "LD_ABS word mixed head/frag",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_SKB_FRAG,
+ { [0x3e] = 0x25, [0x3f] = 0x05, },
+ { {0x40, 0x25051982} },
+ .frag_data = { 0x19, 0x82 },
+ },
+ /*
+ * LD_IND / LD_ABS on non fragmented SKBs
+ */
+ {
+ /*
+ * this tests that the JIT/interpreter correctly resets X
+ * before using it in an LD_IND instruction.
+ */
+ "LD_IND byte default X",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x1] = 0x42 },
+ { {0x40, 0x42 } },
+ },
+ {
+ "LD_IND byte positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 } },
+ },
+ {
+ "LD_IND byte negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x05 } },
+ },
+ {
+ "LD_IND byte positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xff } },
+ },
+ {
+ "LD_IND byte positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND byte negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
+ "LD_IND byte negative offset, multiple calls",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
+ "LD_IND halfword positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0xdd88 } },
+ },
+ {
+ "LD_IND halfword negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0xbb66 } },
+ },
+ {
+ "LD_IND halfword unaligned",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ },
+ { {0x40, 0x66cc } },
+ },
+ {
+ "LD_IND halfword positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffff } },
+ },
+ {
+ "LD_IND halfword positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND halfword negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
+ "LD_IND word positive offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xee99ffaa } },
+ },
+ {
+ "LD_IND word negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xaa55bb66 } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 2)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xbb66cc77 } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 1)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x55bb66cc } },
+ },
+ {
+ "LD_IND word unaligned (addr & 3 == 3)",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x66cc77dd } },
+ },
+ {
+ "LD_IND word positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffffffff } },
+ },
+ {
+ "LD_IND word positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_IND word negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+ BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 } },
+ },
+ {
+ "LD_ABS byte",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xcc } },
+ },
+ {
+ "LD_ABS byte positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xff } },
+ },
+ {
+ "LD_ABS byte positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS byte negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS byte negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
+ "LD_ABS byte negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS byte negative offset, multiple calls",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x82 }, },
+ },
+ {
+ "LD_ABS halfword",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xdd88 } },
+ },
+ {
+ "LD_ABS halfword unaligned",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x99ff } },
+ },
+ {
+ "LD_ABS halfword positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffff } },
+ },
+ {
+ "LD_ABS halfword positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS halfword negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS halfword negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x1982 }, },
+ },
+ {
+ "LD_ABS halfword negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS word",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xaa55bb66 } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 2)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0xdd88ee99 } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 1)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x77dd88ee } },
+ },
+ {
+ "LD_ABS word unaligned (addr & 3 == 3)",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ {
+ [0x1c] = 0xaa, [0x1d] = 0x55,
+ [0x1e] = 0xbb, [0x1f] = 0x66,
+ [0x20] = 0xcc, [0x21] = 0x77,
+ [0x22] = 0xdd, [0x23] = 0x88,
+ [0x24] = 0xee, [0x25] = 0x99,
+ [0x26] = 0xff, [0x27] = 0xaa,
+ },
+ { {0x40, 0x88ee99ff } },
+ },
+ {
+ "LD_ABS word positive offset, all ff",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
+ { {0x40, 0xffffffff } },
+ },
+ {
+ "LD_ABS word positive offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LD_ABS word negative offset, out of bounds load",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_EXPECTED_FAIL,
+ .expected_errcode = -EINVAL,
+ },
+ {
+ "LD_ABS word negative offset, in bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x25051982 }, },
+ },
+ {
+ "LD_ABS word negative offset, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x3f, 0 }, },
+ },
+ {
+ "LDX_MSH standalone, preserved A",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0xffeebbaa }, },
+ },
+ {
+ "LDX_MSH standalone, preserved A 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x175e9d63 }, },
+ },
+ {
+ "LDX_MSH standalone, test result 1",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x14 }, },
+ },
+ {
+ "LDX_MSH standalone, test result 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x24 }, },
+ },
+ {
+ "LDX_MSH standalone, negative offset",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0 }, },
+ },
+ {
+ "LDX_MSH standalone, negative offset 2",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0x24 }, },
+ },
+ {
+ "LDX_MSH standalone, out of bounds",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
+ BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
+ BPF_STMT(BPF_MISC | BPF_TXA, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC,
+ { [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
+ { {0x40, 0 }, },
+ },
+ /*
+ * verify that the interpreter or JIT correctly sets A and X
+ * to 0.
+ */
+ {
+ "ADD default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A + X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "ADD default A",
+ .u.insns = {
+ /*
+ * A = A + 0x42
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "SUB default X",
+ .u.insns = {
+ /*
+ * A = 0x66
+ * A = A - X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x66 } },
+ },
+ {
+ "SUB default A",
+ .u.insns = {
+ /*
+ * A = A - -0x66
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x66 } },
+ },
+ {
+ "MUL default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A * X
+ * ret A
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "MUL default A",
+ .u.insns = {
+ /*
+ * A = A * 0x66
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "DIV default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A / X ; this halt the filter execution if X is 0
+ * ret 0x42
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "DIV default A",
+ .u.insns = {
+ /*
+ * A = A / 1
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "MOD default X",
+ .u.insns = {
+ /*
+ * A = 0x42
+ * A = A mod X ; this halt the filter execution if X is 0
+ * ret 0x42
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+ BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "MOD default A",
+ .u.insns = {
+ /*
+ * A = A mod 1
+ * ret A
+ */
+ BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1),
+ BPF_STMT(BPF_RET | BPF_A, 0x0),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x0 } },
+ },
+ {
+ "JMP EQ default A",
+ .u.insns = {
+ /*
+ * cmp A, 0x0, 0, 1
+ * ret 0x42
+ * ret 0x66
+ */
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_K, 0x66),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ {
+ "JMP EQ default X",
+ .u.insns = {
+ /*
+ * A = 0x0
+ * cmp A, X, 0, 1
+ * ret 0x42
+ * ret 0x66
+ */
+ BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+ BPF_STMT(BPF_RET | BPF_K, 0x42),
+ BPF_STMT(BPF_RET | BPF_K, 0x66),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { {0x1, 0x42 } },
+ },
+ /* Checking interpreter vs JIT wrt signed extended imms. */
+ {
+ "JNE signed compare, test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R2, R4, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 4",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, -17104896),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 5",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xfefb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x7efb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 7",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, 2),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { { 0, 2 } },
+ },
+ /* BPF_LDX_MEM with operand aliasing */
+ {
+ "LDX_MEM_B: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_B, R10, -8, 123),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_B, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_H: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_H, R10, -8, 12345),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_H, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_W: operand register aliasing",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -8, 123456789),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_W, R0, R0, -8),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123456789 } },
+ .stack_depth = 8,
+ },
+ {
+ "LDX_MEM_DW: operand register aliasing",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x123456789abcdefULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_MOV64_REG(R0, R10),
+ BPF_LDX_MEM(BPF_DW, R0, R0, -8),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_MOV64_REG(R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU64_REG(BPF_OR, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /*
+ * Register (non-)clobbering tests for the case where a JIT implements
+ * complex ALU or ATOMIC operations via function calls. If so, the
+ * function call must be transparent to the eBPF registers. The JIT
+ * must therefore save and restore relevant registers across the call.
+ * The following tests check that the eBPF registers retain their
+ * values after such an operation. Mainly intended for complex ALU
+ * and atomic operation, but we run it for all. You never know...
+ *
+ * Note that each operations should be tested twice with different
+ * destinations, to check preservation for all registers.
+ */
+#define BPF_TEST_CLOBBER_ALU(alu, op, dst, src) \
+ { \
+ #alu "_" #op " to " #dst ": no clobbering", \
+ .u.insns_int = { \
+ BPF_ALU64_IMM(BPF_MOV, R0, R0), \
+ BPF_ALU64_IMM(BPF_MOV, R1, R1), \
+ BPF_ALU64_IMM(BPF_MOV, R2, R2), \
+ BPF_ALU64_IMM(BPF_MOV, R3, R3), \
+ BPF_ALU64_IMM(BPF_MOV, R4, R4), \
+ BPF_ALU64_IMM(BPF_MOV, R5, R5), \
+ BPF_ALU64_IMM(BPF_MOV, R6, R6), \
+ BPF_ALU64_IMM(BPF_MOV, R7, R7), \
+ BPF_ALU64_IMM(BPF_MOV, R8, R8), \
+ BPF_ALU64_IMM(BPF_MOV, R9, R9), \
+ BPF_##alu(BPF_ ##op, dst, src), \
+ BPF_ALU32_IMM(BPF_MOV, dst, dst), \
+ BPF_JMP_IMM(BPF_JNE, R0, R0, 10), \
+ BPF_JMP_IMM(BPF_JNE, R1, R1, 9), \
+ BPF_JMP_IMM(BPF_JNE, R2, R2, 8), \
+ BPF_JMP_IMM(BPF_JNE, R3, R3, 7), \
+ BPF_JMP_IMM(BPF_JNE, R4, R4, 6), \
+ BPF_JMP_IMM(BPF_JNE, R5, R5, 5), \
+ BPF_JMP_IMM(BPF_JNE, R6, R6, 4), \
+ BPF_JMP_IMM(BPF_JNE, R7, R7, 3), \
+ BPF_JMP_IMM(BPF_JNE, R8, R8, 2), \
+ BPF_JMP_IMM(BPF_JNE, R9, R9, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R0, 1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 1 } } \
+ }
+ /* ALU64 operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R9, 123456789),
+ /* ALU32 immediate operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R8, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R9, 12),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R9, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R8, 123456789),
+ BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R9, 123456789),
+ /* ALU64 register operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R9, R1),
+ /* ALU32 register operations, register clobbering */
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R9, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R8, R1),
+ BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R9, R1),
+#undef BPF_TEST_CLOBBER_ALU
+#define BPF_TEST_CLOBBER_ATOMIC(width, op) \
+ { \
+ "Atomic_" #width " " #op ": no clobbering", \
+ .u.insns_int = { \
+ BPF_ALU64_IMM(BPF_MOV, R0, 0), \
+ BPF_ALU64_IMM(BPF_MOV, R1, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R2, 2), \
+ BPF_ALU64_IMM(BPF_MOV, R3, 3), \
+ BPF_ALU64_IMM(BPF_MOV, R4, 4), \
+ BPF_ALU64_IMM(BPF_MOV, R5, 5), \
+ BPF_ALU64_IMM(BPF_MOV, R6, 6), \
+ BPF_ALU64_IMM(BPF_MOV, R7, 7), \
+ BPF_ALU64_IMM(BPF_MOV, R8, 8), \
+ BPF_ALU64_IMM(BPF_MOV, R9, 9), \
+ BPF_ST_MEM(width, R10, -8, \
+ (op) == BPF_CMPXCHG ? 0 : \
+ (op) & BPF_FETCH ? 1 : 0), \
+ BPF_ATOMIC_OP(width, op, R10, R1, -8), \
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 10), \
+ BPF_JMP_IMM(BPF_JNE, R1, 1, 9), \
+ BPF_JMP_IMM(BPF_JNE, R2, 2, 8), \
+ BPF_JMP_IMM(BPF_JNE, R3, 3, 7), \
+ BPF_JMP_IMM(BPF_JNE, R4, 4, 6), \
+ BPF_JMP_IMM(BPF_JNE, R5, 5, 5), \
+ BPF_JMP_IMM(BPF_JNE, R6, 6, 4), \
+ BPF_JMP_IMM(BPF_JNE, R7, 7, 3), \
+ BPF_JMP_IMM(BPF_JNE, R8, 8, 2), \
+ BPF_JMP_IMM(BPF_JNE, R9, 9, 1), \
+ BPF_ALU64_IMM(BPF_MOV, R0, 1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 1 } }, \
+ .stack_depth = 8, \
+ }
+ /* 64-bit atomic operations, register clobbering */
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XCHG),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_CMPXCHG),
+ /* 32-bit atomic operations, register clobbering */
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR | BPF_FETCH),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XCHG),
+ BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_CMPXCHG),
+#undef BPF_TEST_CLOBBER_ATOMIC
+ /* Checking that ALU32 src is not zero extended in place */
+#define BPF_ALU32_SRC_ZEXT(op) \
+ { \
+ "ALU32_" #op "_X: src preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R1, 0x0123456789acbdefULL),\
+ BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),\
+ BPF_ALU64_REG(BPF_MOV, R0, R1), \
+ BPF_ALU32_REG(BPF_##op, R2, R1), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_ALU32_SRC_ZEXT(MOV),
+ BPF_ALU32_SRC_ZEXT(AND),
+ BPF_ALU32_SRC_ZEXT(OR),
+ BPF_ALU32_SRC_ZEXT(XOR),
+ BPF_ALU32_SRC_ZEXT(ADD),
+ BPF_ALU32_SRC_ZEXT(SUB),
+ BPF_ALU32_SRC_ZEXT(MUL),
+ BPF_ALU32_SRC_ZEXT(DIV),
+ BPF_ALU32_SRC_ZEXT(MOD),
+#undef BPF_ALU32_SRC_ZEXT
+ /* Checking that ATOMIC32 src is not zero extended in place */
+#define BPF_ATOMIC32_SRC_ZEXT(op) \
+ { \
+ "ATOMIC_W_" #op ": src preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ST_MEM(BPF_W, R10, -4, 0), \
+ BPF_ATOMIC_OP(BPF_W, BPF_##op, R10, R1, -4), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 8, \
+ }
+ BPF_ATOMIC32_SRC_ZEXT(ADD),
+ BPF_ATOMIC32_SRC_ZEXT(AND),
+ BPF_ATOMIC32_SRC_ZEXT(OR),
+ BPF_ATOMIC32_SRC_ZEXT(XOR),
+#undef BPF_ATOMIC32_SRC_ZEXT
+ /* Checking that CMPXCHG32 src is not zero extended in place */
+ {
+ "ATOMIC_W_CMPXCHG: src preserved in zext",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789acbdefULL),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_REG(BPF_MOV, R0, 0),
+ BPF_ST_MEM(BPF_W, R10, -4, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R1, -4),
+ BPF_ALU64_REG(BPF_SUB, R1, R2),
+ BPF_ALU64_REG(BPF_MOV, R2, R1),
+ BPF_ALU64_IMM(BPF_RSH, R2, 32),
+ BPF_ALU64_REG(BPF_OR, R1, R2),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ /* Checking that JMP32 immediate src is not zero extended in place */
+#define BPF_JMP32_IMM_ZEXT(op) \
+ { \
+ "JMP32_" #op "_K: operand preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_JMP32_IMM(BPF_##op, R0, 1234, 1), \
+ BPF_JMP_A(0), /* Nop */ \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_JMP32_IMM_ZEXT(JEQ),
+ BPF_JMP32_IMM_ZEXT(JNE),
+ BPF_JMP32_IMM_ZEXT(JSET),
+ BPF_JMP32_IMM_ZEXT(JGT),
+ BPF_JMP32_IMM_ZEXT(JGE),
+ BPF_JMP32_IMM_ZEXT(JLT),
+ BPF_JMP32_IMM_ZEXT(JLE),
+ BPF_JMP32_IMM_ZEXT(JSGT),
+ BPF_JMP32_IMM_ZEXT(JSGE),
+ BPF_JMP32_IMM_ZEXT(JSGT),
+ BPF_JMP32_IMM_ZEXT(JSLT),
+ BPF_JMP32_IMM_ZEXT(JSLE),
+#undef BPF_JMP2_IMM_ZEXT
+ /* Checking that JMP32 dst & src are not zero extended in place */
+#define BPF_JMP32_REG_ZEXT(op) \
+ { \
+ "JMP32_" #op "_X: operands preserved in zext", \
+ .u.insns_int = { \
+ BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
+ BPF_LD_IMM64(R1, 0xfedcba9876543210ULL),\
+ BPF_ALU64_REG(BPF_MOV, R2, R0), \
+ BPF_ALU64_REG(BPF_MOV, R3, R1), \
+ BPF_JMP32_IMM(BPF_##op, R0, R1, 1), \
+ BPF_JMP_A(0), /* Nop */ \
+ BPF_ALU64_REG(BPF_SUB, R0, R2), \
+ BPF_ALU64_REG(BPF_SUB, R1, R3), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_ALU64_REG(BPF_MOV, R1, R0), \
+ BPF_ALU64_IMM(BPF_RSH, R1, 32), \
+ BPF_ALU64_REG(BPF_OR, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ }
+ BPF_JMP32_REG_ZEXT(JEQ),
+ BPF_JMP32_REG_ZEXT(JNE),
+ BPF_JMP32_REG_ZEXT(JSET),
+ BPF_JMP32_REG_ZEXT(JGT),
+ BPF_JMP32_REG_ZEXT(JGE),
+ BPF_JMP32_REG_ZEXT(JLT),
+ BPF_JMP32_REG_ZEXT(JLE),
+ BPF_JMP32_REG_ZEXT(JSGT),
+ BPF_JMP32_REG_ZEXT(JSGE),
+ BPF_JMP32_REG_ZEXT(JSGT),
+ BPF_JMP32_REG_ZEXT(JSLT),
+ BPF_JMP32_REG_ZEXT(JSLE),
+#undef BPF_JMP2_REG_ZEXT
+ /* ALU64 K register combinations */
+ {
+ "ALU64_MOV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm_regs,
+ },
+ {
+ "ALU64_AND_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm_regs,
+ },
+ {
+ "ALU64_OR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm_regs,
+ },
+ {
+ "ALU64_XOR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm_regs,
+ },
+ {
+ "ALU64_LSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_imm_regs,
+ },
+ {
+ "ALU64_RSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_imm_regs,
+ },
+ {
+ "ALU64_ARSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_imm_regs,
+ },
+ {
+ "ALU64_ADD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm_regs,
+ },
+ {
+ "ALU64_SUB_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm_regs,
+ },
+ {
+ "ALU64_MUL_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm_regs,
+ },
+ {
+ "ALU64_DIV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm_regs,
+ },
+ {
+ "ALU64_MOD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm_regs,
+ },
+ /* ALU32 K registers */
+ {
+ "ALU32_MOV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm_regs,
+ },
+ {
+ "ALU32_AND_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm_regs,
+ },
+ {
+ "ALU32_OR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm_regs,
+ },
+ {
+ "ALU32_XOR_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm_regs,
+ },
+ {
+ "ALU32_LSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm_regs,
+ },
+ {
+ "ALU32_RSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm_regs,
+ },
+ {
+ "ALU32_ARSH_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm_regs,
+ },
+ {
+ "ALU32_ADD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm_regs,
+ },
+ {
+ "ALU32_SUB_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm_regs,
+ },
+ {
+ "ALU32_MUL_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm_regs,
+ },
+ {
+ "ALU32_DIV_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm_regs,
+ },
+ {
+ "ALU32_MOD_K: registers",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm_regs,
+ },
+ /* ALU64 X register combinations */
+ {
+ "ALU64_MOV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg_pairs,
+ },
+ {
+ "ALU64_AND_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg_pairs,
+ },
+ {
+ "ALU64_OR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg_pairs,
+ },
+ {
+ "ALU64_XOR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg_pairs,
+ },
+ {
+ "ALU64_LSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_reg_pairs,
+ },
+ {
+ "ALU64_RSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_reg_pairs,
+ },
+ {
+ "ALU64_ARSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_reg_pairs,
+ },
+ {
+ "ALU64_ADD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg_pairs,
+ },
+ {
+ "ALU64_SUB_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg_pairs,
+ },
+ {
+ "ALU64_MUL_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg_pairs,
+ },
+ {
+ "ALU64_DIV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg_pairs,
+ },
+ {
+ "ALU64_MOD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg_pairs,
+ },
+ /* ALU32 X register combinations */
+ {
+ "ALU32_MOV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg_pairs,
+ },
+ {
+ "ALU32_AND_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg_pairs,
+ },
+ {
+ "ALU32_OR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg_pairs,
+ },
+ {
+ "ALU32_XOR_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg_pairs,
+ },
+ {
+ "ALU32_LSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg_pairs,
+ },
+ {
+ "ALU32_RSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg_pairs,
+ },
+ {
+ "ALU32_ARSH_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg_pairs,
+ },
+ {
+ "ALU32_ADD_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg_pairs,
+ },
+ {
+ "ALU32_SUB_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg_pairs,
+ },
+ {
+ "ALU32_MUL_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg_pairs,
+ },
+ {
+ "ALU32_DIV_X: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg_pairs,
+ },
+ {
+ "ALU32_MOD_X register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg_pairs,
+ },
+ /* Exhaustive test of ALU64 shift operations */
+ {
+ "ALU64_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_imm,
+ },
+ {
+ "ALU64_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_imm,
+ },
+ {
+ "ALU64_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_imm,
+ },
+ {
+ "ALU64_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_reg,
+ },
+ /* Exhaustive test of ALU32 shift operations */
+ {
+ "ALU32_LSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_imm,
+ },
+ {
+ "ALU32_RSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_imm,
+ },
+ {
+ "ALU32_ARSH_K: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_imm,
+ },
+ {
+ "ALU32_LSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_reg,
+ },
+ /*
+ * Exhaustive test of ALU64 shift operations when
+ * source and destination register are the same.
+ */
+ {
+ "ALU64_LSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_lsh_same_reg,
+ },
+ {
+ "ALU64_RSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_rsh_same_reg,
+ },
+ {
+ "ALU64_ARSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_arsh_same_reg,
+ },
+ /*
+ * Exhaustive test of ALU32 shift operations when
+ * source and destination register are the same.
+ */
+ {
+ "ALU32_LSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_lsh_same_reg,
+ },
+ {
+ "ALU32_RSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_rsh_same_reg,
+ },
+ {
+ "ALU32_ARSH_X: all shift values with the same register",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_arsh_same_reg,
+ },
+ /* ALU64 immediate magnitudes */
+ {
+ "ALU64_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 immediate magnitudes */
+ {
+ "ALU32_MOV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU64 register magnitudes */
+ {
+ "ALU64_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU64_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu64_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* ALU32 register magnitudes */
+ {
+ "ALU32_MOV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mov_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_AND_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_and_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_OR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_or_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_XOR_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_xor_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_ADD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_add_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_SUB_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_sub_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MUL_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mul_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_DIV_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_div_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ALU32_MOD_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_alu32_mod_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* LD_IMM64 immediate magnitudes and byte patterns */
+ {
+ "LD_IMM64: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_magn,
+ },
+ {
+ "LD_IMM64: checker byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_checker,
+ },
+ {
+ "LD_IMM64: random positive and zero byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_pos_zero,
+ },
+ {
+ "LD_IMM64: random negative and zero byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_neg_zero,
+ },
+ {
+ "LD_IMM64: random positive and negative byte patterns",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_ld_imm64_pos_neg,
+ },
+ /* 64-bit ATOMIC register combinations */
+ {
+ "ATOMIC_DW_ADD: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_AND: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_OR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XOR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_ADD_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_AND_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_OR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XOR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_XCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_DW_CMPXCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_cmpxchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ /* 32-bit ATOMIC register combinations */
+ {
+ "ATOMIC_W_ADD: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_AND: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_OR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XOR: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_ADD_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_AND_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_OR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XOR_FETCH: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_fetch_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_XCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ {
+ "ATOMIC_W_CMPXCHG: register combinations",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_cmpxchg_reg_pairs,
+ .stack_depth = 8,
+ },
+ /* 64-bit ATOMIC magnitudes */
+ {
+ "ATOMIC_DW_ADD: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_AND: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_OR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XOR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_ADD_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_add_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_AND_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_and_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_OR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_or_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XOR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xor_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_XCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic64_xchg,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_DW_CMPXCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_cmpxchg64,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* 64-bit atomic magnitudes */
+ {
+ "ATOMIC_W_ADD: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_AND: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_OR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XOR: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_ADD_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_add_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_AND_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_and_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_OR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_or_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XOR_FETCH: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xor_fetch,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_XCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_atomic32_xchg,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "ATOMIC_W_CMPXCHG: all operand magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_cmpxchg32,
+ .stack_depth = 8,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP immediate magnitudes */
+ {
+ "JMP_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP register magnitudes */
+ {
+ "JMP_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 immediate magnitudes */
+ {
+ "JMP32_JSET_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_K: all immediate value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_imm,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* JMP32 register magnitudes */
+ {
+ "JMP32_JSET_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jset_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JEQ_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jeq_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JNE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jne_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jlt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsgt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSGE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsge_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLT_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jslt_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ {
+ "JMP32_JSLE_X: all register value magnitudes",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_jmp32_jsle_reg,
+ .nr_testruns = NR_PATTERN_RUNS,
+ },
+ /* Conditional jumps with constant decision */
+ {
+ "JMP_JSET_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JSET, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_K: imm = 0 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLT, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGE_K: imm = 0 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGE, R1, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGT_K: imm = 0xffffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLE_K: imm = 0xffffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSGT_K: imm = 0x7fffffff -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSGE_K: imm = -0x80000000 -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP32_JSLT_K: imm = -0x80000000 -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP32_JSLE_K: imm = 0x7fffffff -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JEQ_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JEQ, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSGE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JSLE_X: dst = src -> always taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JMP_JNE_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSGT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSGT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "JMP_JSLT_X: dst = src -> never taken",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JSLT, R1, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 0 } },
+ },
+ /* Short relative jumps */
+ {
+ "Short relative jump: offset=0",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=1",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=2",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=3",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 3),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ {
+ "Short relative jump: offset=4",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 4),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_ALU32_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, -1),
+ },
+ INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
+ { },
+ { { 0, 0 } },
+ },
+ /* Conditional branch conversions */
+ {
+ "Long conditional jump: taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_taken,
+ },
+ {
+ "Long conditional jump: not taken at runtime",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_not_taken,
+ },
+ {
+ "Long conditional jump: always taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_max_jmp_always_taken,
+ },
+ {
+ "Long conditional jump: never taken, known at JIT time",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 2 } },
+ .fill_helper = bpf_fill_max_jmp_never_taken,
+ },
+ /* Staggered jump sequences, immediate */
+ {
+ "Staggered jumps: JMP_JA",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_ja,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, register */
+ {
+ "Staggered jumps: JMP_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 immediate */
+ {
+ "Staggered jumps: JMP32_JEQ_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_K",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_imm,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ /* Staggered jump sequences, JMP32 register */
+ {
+ "Staggered jumps: JMP32_JEQ_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jeq32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JNE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jne32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSET_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jset32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jlt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsgt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSGE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsge32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLT_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jslt32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+ {
+ "Staggered jumps: JMP32_JSLE_X",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
+ .fill_helper = bpf_fill_staggered_jsle32_reg,
+ .nr_testruns = NR_STAGGERED_JMP_RUNS,
+ },
+};
+
+static struct net_device dev;
+
+static struct sk_buff *populate_skb(char *buf, int size)
+{
+ struct sk_buff *skb;
+
+ if (size >= MAX_DATA)
+ return NULL;
+
+ skb = alloc_skb(MAX_DATA, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ __skb_put_data(skb, buf, size);
+
+ /* Initialize a fake skb with test pattern. */
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = SKB_TYPE;
+ skb->mark = SKB_MARK;
+ skb->hash = SKB_HASH;
+ skb->queue_mapping = SKB_QUEUE_MAP;
+ skb->vlan_tci = SKB_VLAN_TCI;
+ skb->vlan_proto = htons(ETH_P_IP);
+ dev_net_set(&dev, &init_net);
+ skb->dev = &dev;
+ skb->dev->ifindex = SKB_DEV_IFINDEX;
+ skb->dev->type = SKB_DEV_TYPE;
+ skb_set_network_header(skb, min(size, ETH_HLEN));
+
+ return skb;
+}
+
+static void *generate_test_data(struct bpf_test *test, int sub)
+{
+ struct sk_buff *skb;
+ struct page *page;
+
+ if (test->aux & FLAG_NO_DATA)
+ return NULL;
+
+ if (test->aux & FLAG_LARGE_MEM)
+ return kmalloc(test->test[sub].data_size, GFP_KERNEL);
+
+ /* Test case expects an skb, so populate one. Various
+ * subtests generate skbs of different sizes based on
+ * the same data.
+ */
+ skb = populate_skb(test->data, test->test[sub].data_size);
+ if (!skb)
+ return NULL;
+
+ if (test->aux & FLAG_SKB_FRAG) {
+ /*
+ * when the test requires a fragmented skb, add a
+ * single fragment to the skb, filled with
+ * test->frag_data.
+ */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ goto err_kfree_skb;
+
+ memcpy(page_address(page), test->frag_data, MAX_DATA);
+ skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
+ }
+
+ return skb;
+err_kfree_skb:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static void release_test_data(const struct bpf_test *test, void *data)
+{
+ if (test->aux & FLAG_NO_DATA)
+ return;
+
+ if (test->aux & FLAG_LARGE_MEM)
+ kfree(data);
+ else
+ kfree_skb(data);
+}
+
+static int filter_length(int which)
+{
+ struct sock_filter *fp;
+ int len;
+
+ if (tests[which].fill_helper)
+ return tests[which].u.ptr.len;
+
+ fp = tests[which].u.insns;
+ for (len = MAX_INSNS - 1; len > 0; --len)
+ if (fp[len].code != 0 || fp[len].k != 0)
+ break;
+
+ return len + 1;
+}
+
+static void *filter_pointer(int which)
+{
+ if (tests[which].fill_helper)
+ return tests[which].u.ptr.insns;
+ else
+ return tests[which].u.insns;
+}
+
+static struct bpf_prog *generate_filter(int which, int *err)
+{
+ __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+ unsigned int flen = filter_length(which);
+ void *fptr = filter_pointer(which);
+ struct sock_fprog_kern fprog;
+ struct bpf_prog *fp;
+
+ switch (test_type) {
+ case CLASSIC:
+ fprog.filter = fptr;
+ fprog.len = flen;
+
+ *err = bpf_prog_create(&fp, &fprog);
+ if (tests[which].aux & FLAG_EXPECTED_FAIL) {
+ if (*err == tests[which].expected_errcode) {
+ pr_cont("PASS\n");
+ /* Verifier rejected filter as expected. */
+ *err = 0;
+ return NULL;
+ } else {
+ pr_cont("UNEXPECTED_PASS\n");
+ /* Verifier didn't reject the test that's
+ * bad enough, just return!
+ */
+ *err = -EINVAL;
+ return NULL;
+ }
+ }
+ if (*err) {
+ pr_cont("FAIL to prog_create err=%d len=%d\n",
+ *err, fprog.len);
+ return NULL;
+ }
+ break;
+
+ case INTERNAL:
+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
+ if (fp == NULL) {
+ pr_cont("UNEXPECTED_FAIL no memory left\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ fp->len = flen;
+ /* Type doesn't really matter here as long as it's not unspec. */
+ fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
+ memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
+ fp->aux->stack_depth = tests[which].stack_depth;
+ fp->aux->verifier_zext = !!(tests[which].aux &
+ FLAG_VERIFIER_ZEXT);
+
+ /* We cannot error here as we don't need type compatibility
+ * checks.
+ */
+ fp = bpf_prog_select_runtime(fp, err);
+ if (*err) {
+ pr_cont("FAIL to select_runtime err=%d\n", *err);
+ return NULL;
+ }
+ break;
+ }
+
+ *err = 0;
+ return fp;
+}
+
+static void release_filter(struct bpf_prog *fp, int which)
+{
+ __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+ switch (test_type) {
+ case CLASSIC:
+ bpf_prog_destroy(fp);
+ break;
+ case INTERNAL:
+ bpf_prog_free(fp);
+ break;
+ }
+}
+
+static int __run_one(const struct bpf_prog *fp, const void *data,
+ int runs, u64 *duration)
+{
+ u64 start, finish;
+ int ret = 0, i;
+
+ migrate_disable();
+ start = ktime_get_ns();
+
+ for (i = 0; i < runs; i++)
+ ret = bpf_prog_run(fp, data);
+
+ finish = ktime_get_ns();
+ migrate_enable();
+
+ *duration = finish - start;
+ do_div(*duration, runs);
+
+ return ret;
+}
+
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
+{
+ int err_cnt = 0, i, runs = MAX_TESTRUNS;
+
+ if (test->nr_testruns)
+ runs = min(test->nr_testruns, MAX_TESTRUNS);
+
+ for (i = 0; i < MAX_SUBTESTS; i++) {
+ void *data;
+ u64 duration;
+ u32 ret;
+
+ /*
+ * NOTE: Several sub-tests may be present, in which case
+ * a zero {data_size, result} tuple indicates the end of
+ * the sub-test array. The first test is always run,
+ * even if both data_size and result happen to be zero.
+ */
+ if (i > 0 &&
+ test->test[i].data_size == 0 &&
+ test->test[i].result == 0)
+ break;
+
+ data = generate_test_data(test, i);
+ if (!data && !(test->aux & FLAG_NO_DATA)) {
+ pr_cont("data generation failed ");
+ err_cnt++;
+ break;
+ }
+ ret = __run_one(fp, data, runs, &duration);
+ release_test_data(test, data);
+
+ if (ret == test->test[i].result) {
+ pr_cont("%lld ", duration);
+ } else {
+ s32 res = test->test[i].result;
+
+ pr_cont("ret %d != %d (%#x != %#x)",
+ ret, res, ret, res);
+ err_cnt++;
+ }
+ }
+
+ return err_cnt;
+}
+
+static char test_name[64];
+module_param_string(test_name, test_name, sizeof(test_name), 0);
+
+static int test_id = -1;
+module_param(test_id, int, 0);
+
+static int test_range[2] = { 0, INT_MAX };
+module_param_array(test_range, int, NULL, 0);
+
+static bool exclude_test(int test_id)
+{
+ return test_id < test_range[0] || test_id > test_range[1];
+}
+
+static __init struct sk_buff *build_test_skb(void)
+{
+ u32 headroom = NET_SKB_PAD + NET_IP_ALIGN + ETH_HLEN;
+ struct sk_buff *skb[2];
+ struct page *page[2];
+ int i, data_size = 8;
+
+ for (i = 0; i < 2; i++) {
+ page[i] = alloc_page(GFP_KERNEL);
+ if (!page[i]) {
+ if (i == 0)
+ goto err_page0;
+ else
+ goto err_page1;
+ }
+
+ /* this will set skb[i]->head_frag */
+ skb[i] = dev_alloc_skb(headroom + data_size);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], data_size);
+ skb[i]->protocol = htons(ETH_P_IP);
+ skb_reset_network_header(skb[i]);
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+
+ skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
+ // skb_headlen(skb[i]): 8, skb[i]->head_frag = 1
+ }
+
+ /* setup shinfo */
+ skb_shinfo(skb[0])->gso_size = 1448;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
+ skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+ skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000;
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->data_len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ __free_page(page[1]);
+err_page1:
+ kfree_skb(skb[0]);
+err_skb0:
+ __free_page(page[0]);
+err_page0:
+ return NULL;
+}
+
+static __init struct sk_buff *build_test_skb_linear_no_head_frag(void)
+{
+ unsigned int alloc_size = 2000;
+ unsigned int headroom = 102, doffset = 72, data_size = 1308;
+ struct sk_buff *skb[2];
+ int i;
+
+ /* skbs linked in a frag_list, both with linear data, with head_frag=0
+ * (data allocated by kmalloc), both have tcp data of 1308 bytes
+ * (total payload is 2616 bytes).
+ * Data offset is 72 bytes (40 ipv6 hdr, 32 tcp hdr). Some headroom.
+ */
+ for (i = 0; i < 2; i++) {
+ skb[i] = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!skb[i]) {
+ if (i == 0)
+ goto err_skb0;
+ else
+ goto err_skb1;
+ }
+
+ skb[i]->protocol = htons(ETH_P_IPV6);
+ skb_reserve(skb[i], headroom);
+ skb_put(skb[i], doffset + data_size);
+ skb_reset_network_header(skb[i]);
+ if (i == 0)
+ skb_reset_mac_header(skb[i]);
+ else
+ skb_set_mac_header(skb[i], -ETH_HLEN);
+ __skb_pull(skb[i], doffset);
+ }
+
+ /* setup shinfo.
+ * mimic bpf_skb_proto_4_to_6, which resets gso_segs and assigns a
+ * reduced gso_size.
+ */
+ skb_shinfo(skb[0])->gso_size = 1288;
+ skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV6 | SKB_GSO_DODGY;
+ skb_shinfo(skb[0])->gso_segs = 0;
+ skb_shinfo(skb[0])->frag_list = skb[1];
+
+ /* adjust skb[0]'s len */
+ skb[0]->len += skb[1]->len;
+ skb[0]->data_len += skb[1]->len;
+ skb[0]->truesize += skb[1]->truesize;
+
+ return skb[0];
+
+err_skb1:
+ kfree_skb(skb[0]);
+err_skb0:
+ return NULL;
+}
+
+struct skb_segment_test {
+ const char *descr;
+ struct sk_buff *(*build_skb)(void);
+ netdev_features_t features;
+};
+
+static struct skb_segment_test skb_segment_tests[] __initconst = {
+ {
+ .descr = "gso_with_rx_frags",
+ .build_skb = build_test_skb,
+ .features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM
+ },
+ {
+ .descr = "gso_linear_no_head_frag",
+ .build_skb = build_test_skb_linear_no_head_frag,
+ .features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
+ NETIF_F_LLTX | NETIF_F_GRO |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_STAG_TX
+ }
+};
+
+static __init int test_skb_segment_single(const struct skb_segment_test *test)
+{
+ struct sk_buff *skb, *segs;
+ int ret = -1;
+
+ skb = test->build_skb();
+ if (!skb) {
+ pr_info("%s: failed to build_test_skb", __func__);
+ goto done;
+ }
+
+ segs = skb_segment(skb, test->features);
+ if (!IS_ERR(segs)) {
+ kfree_skb_list(segs);
+ ret = 0;
+ }
+ kfree_skb(skb);
+done:
+ return ret;
+}
+
+static __init int test_skb_segment(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ const struct skb_segment_test *test = &skb_segment_tests[i];
+
+ cond_resched();
+ if (exclude_test(i))
+ continue;
+
+ pr_info("#%d %s ", i, test->descr);
+
+ if (test_skb_segment_single(test)) {
+ pr_cont("FAIL\n");
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED\n", __func__,
+ pass_cnt, err_cnt);
+ return err_cnt ? -EINVAL : 0;
+}
+
+static __init int test_bpf(void)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+ int jit_cnt = 0, run_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ struct bpf_prog *fp;
+ int err;
+
+ cond_resched();
+ if (exclude_test(i))
+ continue;
+
+ pr_info("#%d %s ", i, tests[i].descr);
+
+ if (tests[i].fill_helper &&
+ tests[i].fill_helper(&tests[i]) < 0) {
+ pr_cont("FAIL to prog_fill\n");
+ continue;
+ }
+
+ fp = generate_filter(i, &err);
+
+ if (tests[i].fill_helper) {
+ kfree(tests[i].u.ptr.insns);
+ tests[i].u.ptr.insns = NULL;
+ }
+
+ if (fp == NULL) {
+ if (err == 0) {
+ pass_cnt++;
+ continue;
+ }
+ err_cnt++;
+ continue;
+ }
+
+ pr_cont("jited:%u ", fp->jited);
+
+ run_cnt++;
+ if (fp->jited)
+ jit_cnt++;
+
+ err = run_one(fp, &tests[i]);
+ release_filter(fp, i);
+
+ if (err) {
+ pr_cont("FAIL (%d times)\n", err);
+ err_cnt++;
+ } else {
+ pr_cont("PASS\n");
+ pass_cnt++;
+ }
+ }
+
+ pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+ pass_cnt, err_cnt, jit_cnt, run_cnt);
+
+ return err_cnt ? -EINVAL : 0;
+}
+
+struct tail_call_test {
+ const char *descr;
+ struct bpf_insn insns[MAX_INSNS];
+ int flags;
+ int result;
+ int stack_depth;
+};
+
+/* Flags that can be passed to tail call test cases */
+#define FLAG_NEED_STATE BIT(0)
+#define FLAG_RESULT_IN_STATE BIT(1)
+
+/*
+ * Magic marker used in test snippets for tail calls below.
+ * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
+ * with the proper values by the test runner.
+ */
+#define TAIL_CALL_MARKER 0x7a11ca11
+
+/* Special offset to indicate a NULL call target */
+#define TAIL_CALL_NULL 0x7fff
+
+/* Special offset to indicate an out-of-range index */
+#define TAIL_CALL_INVALID 0x7ffe
+
+#define TAIL_CALL(offset) \
+ BPF_LD_IMM64(R2, TAIL_CALL_MARKER), \
+ BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
+ offset, TAIL_CALL_MARKER), \
+ BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
+
+/*
+ * A test function to be called from a BPF program, clobbering a lot of
+ * CPU registers in the process. A JITed BPF program calling this function
+ * must save and restore any caller-saved registers it uses for internal
+ * state, for example the current tail call count.
+ */
+BPF_CALL_1(bpf_test_func, u64, arg)
+{
+ char buf[64];
+ long a = 0;
+ long b = 1;
+ long c = 2;
+ long d = 3;
+ long e = 4;
+ long f = 5;
+ long g = 6;
+ long h = 7;
+
+ return snprintf(buf, sizeof(buf),
+ "%ld %lu %lx %ld %lu %lx %ld %lu %x",
+ a, b, c, d, e, f, g, h, (int)arg);
+}
+#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
+
+/*
+ * Tail call tests. Each test case may call any other test in the table,
+ * including itself, specified as a relative index offset from the calling
+ * test. The index TAIL_CALL_NULL can be used to specify a NULL target
+ * function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID
+ * results in a target index that is out of range.
+ */
+static struct tail_call_test tail_call_tests[] = {
+ {
+ "Tail call leaf",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 1,
+ },
+ {
+ "Tail call 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 3,
+ },
+ {
+ "Tail call 3",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 3),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 6,
+ },
+ {
+ "Tail call 4",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 4),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 10,
+ },
+ {
+ "Tail call load/store leaf",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R1, 1),
+ BPF_ALU64_IMM(BPF_MOV, R2, 2),
+ BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
+ BPF_STX_MEM(BPF_DW, R3, R1, -8),
+ BPF_STX_MEM(BPF_DW, R3, R2, -16),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 3),
+ BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 32,
+ },
+ {
+ "Tail call load/store",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, 3),
+ BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 0,
+ .stack_depth = 16,
+ },
+ {
+ "Tail call error path, max count reached",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ },
+ {
+ "Tail call count preserved across function calls",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+ BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
+ BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
+ BPF_CALL_REL(BPF_FUNC_jiffies64),
+ BPF_CALL_REL(BPF_FUNC_test_func),
+ BPF_LDX_MEM(BPF_DW, R1, R10, -8),
+ BPF_ALU32_REG(BPF_MOV, R0, R1),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .stack_depth = 8,
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
+ },
+ {
+ "Tail call error path, NULL target",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(TAIL_CALL_NULL),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = MAX_TESTRUNS,
+ },
+ {
+ "Tail call error path, index out of range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, R2, R1, 0),
+ BPF_ALU64_IMM(BPF_ADD, R2, 1),
+ BPF_STX_MEM(BPF_W, R1, R2, 0),
+ TAIL_CALL(TAIL_CALL_INVALID),
+ BPF_EXIT_INSN(),
+ },
+ .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
+ .result = MAX_TESTRUNS,
+ },
+};
+
+static void __init destroy_tail_call_tests(struct bpf_array *progs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
+ if (progs->ptrs[i])
+ bpf_prog_free(progs->ptrs[i]);
+ kfree(progs);
+}
+
+static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
+{
+ int ntests = ARRAY_SIZE(tail_call_tests);
+ struct bpf_array *progs;
+ int which, err;
+
+ /* Allocate the table of programs to be used for tail calls */
+ progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
+ if (!progs)
+ goto out_nomem;
+
+ /* Create all eBPF programs and populate the table */
+ for (which = 0; which < ntests; which++) {
+ struct tail_call_test *test = &tail_call_tests[which];
+ struct bpf_prog *fp;
+ int len, i;
+
+ /* Compute the number of program instructions */
+ for (len = 0; len < MAX_INSNS; len++) {
+ struct bpf_insn *insn = &test->insns[len];
+
+ if (len < MAX_INSNS - 1 &&
+ insn->code == (BPF_LD | BPF_DW | BPF_IMM))
+ len++;
+ if (insn->code == 0)
+ break;
+ }
+
+ /* Allocate and initialize the program */
+ fp = bpf_prog_alloc(bpf_prog_size(len), 0);
+ if (!fp)
+ goto out_nomem;
+
+ fp->len = len;
+ fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
+ fp->aux->stack_depth = test->stack_depth;
+ memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
+
+ /* Relocate runtime tail call offsets and addresses */
+ for (i = 0; i < len; i++) {
+ struct bpf_insn *insn = &fp->insnsi[i];
+ long addr = 0;
+
+ switch (insn->code) {
+ case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
+ insn[0].imm = (u32)(long)progs;
+ insn[1].imm = ((u64)(long)progs) >> 32;
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_K:
+ if (insn->imm != TAIL_CALL_MARKER)
+ break;
+ if (insn->off == TAIL_CALL_NULL)
+ insn->imm = ntests;
+ else if (insn->off == TAIL_CALL_INVALID)
+ insn->imm = ntests + 1;
+ else
+ insn->imm = which + insn->off;
+ insn->off = 0;
+ break;
+
+ case BPF_JMP | BPF_CALL:
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ break;
+ switch (insn->imm) {
+ case BPF_FUNC_get_numa_node_id:
+ addr = (long)&numa_node_id;
+ break;
+ case BPF_FUNC_ktime_get_ns:
+ addr = (long)&ktime_get_ns;
+ break;
+ case BPF_FUNC_ktime_get_boot_ns:
+ addr = (long)&ktime_get_boot_fast_ns;
+ break;
+ case BPF_FUNC_ktime_get_coarse_ns:
+ addr = (long)&ktime_get_coarse_ns;
+ break;
+ case BPF_FUNC_jiffies64:
+ addr = (long)&get_jiffies_64;
+ break;
+ case BPF_FUNC_test_func:
+ addr = (long)&bpf_test_func;
+ break;
+ default:
+ err = -EFAULT;
+ goto out_err;
+ }
+ *insn = BPF_EMIT_CALL(addr);
+ if ((long)__bpf_call_base + insn->imm != addr)
+ *insn = BPF_JMP_A(0); /* Skip: NOP */
+ break;
+ }
+ }
+
+ fp = bpf_prog_select_runtime(fp, &err);
+ if (err)
+ goto out_err;
+
+ progs->ptrs[which] = fp;
+ }
+
+ /* The last entry contains a NULL program pointer */
+ progs->map.max_entries = ntests + 1;
+ *pprogs = progs;
+ return 0;
+
+out_nomem:
+ err = -ENOMEM;
+
+out_err:
+ if (progs)
+ destroy_tail_call_tests(progs);
+ return err;
+}
+
+static __init int test_tail_calls(struct bpf_array *progs)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+ int jit_cnt = 0, run_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+ struct tail_call_test *test = &tail_call_tests[i];
+ struct bpf_prog *fp = progs->ptrs[i];
+ int *data = NULL;
+ int state = 0;
+ u64 duration;
+ int ret;
+
+ cond_resched();
+ if (exclude_test(i))
+ continue;
+
+ pr_info("#%d %s ", i, test->descr);
+ if (!fp) {
+ err_cnt++;
+ continue;
+ }
+ pr_cont("jited:%u ", fp->jited);
+
+ run_cnt++;
+ if (fp->jited)
+ jit_cnt++;
+
+ if (test->flags & FLAG_NEED_STATE)
+ data = &state;
+ ret = __run_one(fp, data, MAX_TESTRUNS, &duration);
+ if (test->flags & FLAG_RESULT_IN_STATE)
+ ret = state;
+ if (ret == test->result) {
+ pr_cont("%lld PASS", duration);
+ pass_cnt++;
+ } else {
+ pr_cont("ret %d != %d FAIL", ret, test->result);
+ err_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+ __func__, pass_cnt, err_cnt, jit_cnt, run_cnt);
+
+ return err_cnt ? -EINVAL : 0;
+}
+
+static char test_suite[32];
+module_param_string(test_suite, test_suite, sizeof(test_suite), 0);
+
+static __init int find_test_index(const char *test_name)
+{
+ int i;
+
+ if (!strcmp(test_suite, "test_bpf")) {
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!strcmp(tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ if (!strcmp(test_suite, "test_tail_calls")) {
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+ if (!strcmp(tail_call_tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ if (!strcmp(test_suite, "test_skb_segment")) {
+ for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
+ if (!strcmp(skb_segment_tests[i].descr, test_name))
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static __init int prepare_test_range(void)
+{
+ int valid_range;
+
+ if (!strcmp(test_suite, "test_bpf"))
+ valid_range = ARRAY_SIZE(tests);
+ else if (!strcmp(test_suite, "test_tail_calls"))
+ valid_range = ARRAY_SIZE(tail_call_tests);
+ else if (!strcmp(test_suite, "test_skb_segment"))
+ valid_range = ARRAY_SIZE(skb_segment_tests);
+ else
+ return 0;
+
+ if (test_id >= 0) {
+ /*
+ * if a test_id was specified, use test_range to
+ * cover only that test.
+ */
+ if (test_id >= valid_range) {
+ pr_err("test_bpf: invalid test_id specified for '%s' suite.\n",
+ test_suite);
+ return -EINVAL;
+ }
+
+ test_range[0] = test_id;
+ test_range[1] = test_id;
+ } else if (*test_name) {
+ /*
+ * if a test_name was specified, find it and setup
+ * test_range to cover only that test.
+ */
+ int idx = find_test_index(test_name);
+
+ if (idx < 0) {
+ pr_err("test_bpf: no test named '%s' found for '%s' suite.\n",
+ test_name, test_suite);
+ return -EINVAL;
+ }
+ test_range[0] = idx;
+ test_range[1] = idx;
+ } else if (test_range[0] != 0 || test_range[1] != INT_MAX) {
+ /*
+ * check that the supplied test_range is valid.
+ */
+ if (test_range[0] < 0 || test_range[1] >= valid_range) {
+ pr_err("test_bpf: test_range is out of bound for '%s' suite.\n",
+ test_suite);
+ return -EINVAL;
+ }
+
+ if (test_range[1] < test_range[0]) {
+ pr_err("test_bpf: test_range is ending before it starts.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int __init test_bpf_init(void)
+{
+ struct bpf_array *progs = NULL;
+ int ret;
+
+ if (strlen(test_suite) &&
+ strcmp(test_suite, "test_bpf") &&
+ strcmp(test_suite, "test_tail_calls") &&
+ strcmp(test_suite, "test_skb_segment")) {
+ pr_err("test_bpf: invalid test_suite '%s' specified.\n", test_suite);
+ return -EINVAL;
+ }
+
+ /*
+ * if test_suite is not specified, but test_id, test_name or test_range
+ * is specified, set 'test_bpf' as the default test suite.
+ */
+ if (!strlen(test_suite) &&
+ (test_id != -1 || strlen(test_name) ||
+ (test_range[0] != 0 || test_range[1] != INT_MAX))) {
+ pr_info("test_bpf: set 'test_bpf' as the default test_suite.\n");
+ strscpy(test_suite, "test_bpf", sizeof(test_suite));
+ }
+
+ ret = prepare_test_range();
+ if (ret < 0)
+ return ret;
+
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_bpf")) {
+ ret = test_bpf();
+ if (ret)
+ return ret;
+ }
+
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_tail_calls")) {
+ ret = prepare_tail_call_tests(&progs);
+ if (ret)
+ return ret;
+ ret = test_tail_calls(progs);
+ destroy_tail_call_tests(progs);
+ if (ret)
+ return ret;
+ }
+
+ if (!strlen(test_suite) || !strcmp(test_suite, "test_skb_segment"))
+ return test_skb_segment();
+
+ return 0;
+}
+
+static void __exit test_bpf_exit(void)
+{
+}
+
+module_init(test_bpf_init);
+module_exit(test_bpf_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
new file mode 100644
index 000000000..b7cc0aaee
--- /dev/null
+++ b/lib/test_debug_virtual.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sizes.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#ifdef CONFIG_MIPS
+#include <asm/bootinfo.h>
+#endif
+
+struct foo {
+ unsigned int bar;
+};
+
+static struct foo *foo;
+
+static int __init test_debug_virtual_init(void)
+{
+ phys_addr_t pa;
+ void *va;
+
+ va = (void *)VMALLOC_START;
+ pa = virt_to_phys(va);
+
+ pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va);
+
+ foo = kzalloc(sizeof(*foo), GFP_KERNEL);
+ if (!foo)
+ return -ENOMEM;
+
+ pa = virt_to_phys(foo);
+ va = foo;
+ pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va);
+
+ return 0;
+}
+module_init(test_debug_virtual_init);
+
+static void __exit test_debug_virtual_exit(void)
+{
+ kfree(foo);
+}
+module_exit(test_debug_virtual_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module for CONFIG_DEBUG_VIRTUAL");
diff --git a/lib/test_dynamic_debug.c b/lib/test_dynamic_debug.c
new file mode 100644
index 000000000..8dd250ad0
--- /dev/null
+++ b/lib/test_dynamic_debug.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing dynamic_debug
+ *
+ * Authors:
+ * Jim Cromie <jim.cromie@gmail.com>
+ */
+
+#define pr_fmt(fmt) "test_dd: " fmt
+
+#include <linux/module.h>
+
+/* run tests by reading or writing sysfs node: do_prints */
+
+static void do_prints(void); /* device under test */
+static int param_set_do_prints(const char *instr, const struct kernel_param *kp)
+{
+ do_prints();
+ return 0;
+}
+static int param_get_do_prints(char *buffer, const struct kernel_param *kp)
+{
+ do_prints();
+ return scnprintf(buffer, PAGE_SIZE, "did do_prints\n");
+}
+static const struct kernel_param_ops param_ops_do_prints = {
+ .set = param_set_do_prints,
+ .get = param_get_do_prints,
+};
+module_param_cb(do_prints, &param_ops_do_prints, NULL, 0600);
+
+/*
+ * Using the CLASSMAP api:
+ * - classmaps must have corresponding enum
+ * - enum symbols must match/correlate with class-name strings in the map.
+ * - base must equal enum's 1st value
+ * - multiple maps must set their base to share the 0-30 class_id space !!
+ * (build-bug-on tips welcome)
+ * Additionally, here:
+ * - tie together sysname, mapname, bitsname, flagsname
+ */
+#define DD_SYS_WRAP(_model, _flags) \
+ static unsigned long bits_##_model; \
+ static struct ddebug_class_param _flags##_model = { \
+ .bits = &bits_##_model, \
+ .flags = #_flags, \
+ .map = &map_##_model, \
+ }; \
+ module_param_cb(_flags##_##_model, &param_ops_dyndbg_classes, &_flags##_model, 0600)
+
+/* numeric input, independent bits */
+enum cat_disjoint_bits {
+ D2_CORE = 0,
+ D2_DRIVER,
+ D2_KMS,
+ D2_PRIME,
+ D2_ATOMIC,
+ D2_VBL,
+ D2_STATE,
+ D2_LEASE,
+ D2_DP,
+ D2_DRMRES };
+DECLARE_DYNDBG_CLASSMAP(map_disjoint_bits, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "D2_CORE",
+ "D2_DRIVER",
+ "D2_KMS",
+ "D2_PRIME",
+ "D2_ATOMIC",
+ "D2_VBL",
+ "D2_STATE",
+ "D2_LEASE",
+ "D2_DP",
+ "D2_DRMRES");
+DD_SYS_WRAP(disjoint_bits, p);
+DD_SYS_WRAP(disjoint_bits, T);
+
+/* symbolic input, independent bits */
+enum cat_disjoint_names { LOW = 11, MID, HI };
+DECLARE_DYNDBG_CLASSMAP(map_disjoint_names, DD_CLASS_TYPE_DISJOINT_NAMES, 10,
+ "LOW", "MID", "HI");
+DD_SYS_WRAP(disjoint_names, p);
+DD_SYS_WRAP(disjoint_names, T);
+
+/* numeric verbosity, V2 > V1 related */
+enum cat_level_num { V0 = 14, V1, V2, V3, V4, V5, V6, V7 };
+DECLARE_DYNDBG_CLASSMAP(map_level_num, DD_CLASS_TYPE_LEVEL_NUM, 14,
+ "V0", "V1", "V2", "V3", "V4", "V5", "V6", "V7");
+DD_SYS_WRAP(level_num, p);
+DD_SYS_WRAP(level_num, T);
+
+/* symbolic verbosity */
+enum cat_level_names { L0 = 22, L1, L2, L3, L4, L5, L6, L7 };
+DECLARE_DYNDBG_CLASSMAP(map_level_names, DD_CLASS_TYPE_LEVEL_NAMES, 22,
+ "L0", "L1", "L2", "L3", "L4", "L5", "L6", "L7");
+DD_SYS_WRAP(level_names, p);
+DD_SYS_WRAP(level_names, T);
+
+/* stand-in for all pr_debug etc */
+#define prdbg(SYM) __pr_debug_cls(SYM, #SYM " msg\n")
+
+static void do_cats(void)
+{
+ pr_debug("doing categories\n");
+
+ prdbg(LOW);
+ prdbg(MID);
+ prdbg(HI);
+
+ prdbg(D2_CORE);
+ prdbg(D2_DRIVER);
+ prdbg(D2_KMS);
+ prdbg(D2_PRIME);
+ prdbg(D2_ATOMIC);
+ prdbg(D2_VBL);
+ prdbg(D2_STATE);
+ prdbg(D2_LEASE);
+ prdbg(D2_DP);
+ prdbg(D2_DRMRES);
+}
+
+static void do_levels(void)
+{
+ pr_debug("doing levels\n");
+
+ prdbg(V1);
+ prdbg(V2);
+ prdbg(V3);
+ prdbg(V4);
+ prdbg(V5);
+ prdbg(V6);
+ prdbg(V7);
+
+ prdbg(L1);
+ prdbg(L2);
+ prdbg(L3);
+ prdbg(L4);
+ prdbg(L5);
+ prdbg(L6);
+ prdbg(L7);
+}
+
+static void do_prints(void)
+{
+ do_cats();
+ do_levels();
+}
+
+static int __init test_dynamic_debug_init(void)
+{
+ pr_debug("init start\n");
+ do_prints();
+ pr_debug("init done\n");
+ return 0;
+}
+
+static void __exit test_dynamic_debug_exit(void)
+{
+ pr_debug("exited\n");
+}
+
+module_init(test_dynamic_debug_init);
+module_exit(test_dynamic_debug_exit);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
new file mode 100644
index 000000000..add4699fc
--- /dev/null
+++ b/lib/test_firmware.c
@@ -0,0 +1,1569 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This module provides an interface to trigger and test firmware loading.
+ *
+ * It is designed to be used for basic evaluation of the firmware loading
+ * subsystem (for example when validating firmware verification). It lacks
+ * any extra dependencies, and will not normally be loaded by the system
+ * unless explicitly requested by name.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/kstrtox.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/efi_embedded_fw.h>
+
+MODULE_IMPORT_NS(TEST_FIRMWARE);
+
+#define TEST_FIRMWARE_NAME "test-firmware.bin"
+#define TEST_FIRMWARE_NUM_REQS 4
+#define TEST_FIRMWARE_BUF_SIZE SZ_1K
+#define TEST_UPLOAD_MAX_SIZE SZ_2K
+#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */
+
+static DEFINE_MUTEX(test_fw_mutex);
+static const struct firmware *test_firmware;
+static LIST_HEAD(test_upload_list);
+
+struct test_batched_req {
+ u8 idx;
+ int rc;
+ bool sent;
+ const struct firmware *fw;
+ const char *name;
+ const char *fw_buf;
+ struct completion completion;
+ struct task_struct *task;
+ struct device *dev;
+};
+
+/**
+ * struct test_config - represents configuration for the test for different triggers
+ *
+ * @name: the name of the firmware file to look for
+ * @into_buf: when the into_buf is used if this is true
+ * request_firmware_into_buf() will be used instead.
+ * @buf_size: size of buf to allocate when into_buf is true
+ * @file_offset: file offset to request when calling request_firmware_into_buf
+ * @partial: partial read opt when calling request_firmware_into_buf
+ * @sync_direct: when the sync trigger is used if this is true
+ * request_firmware_direct() will be used instead.
+ * @send_uevent: whether or not to send a uevent for async requests
+ * @num_requests: number of requests to try per test case. This is trigger
+ * specific.
+ * @reqs: stores all requests information
+ * @read_fw_idx: index of thread from which we want to read firmware results
+ * from through the read_fw trigger.
+ * @upload_name: firmware name to be used with upload_read sysfs node
+ * @test_result: a test may use this to collect the result from the call
+ * of the request_firmware*() calls used in their tests. In order of
+ * priority we always keep first any setup error. If no setup errors were
+ * found then we move on to the first error encountered while running the
+ * API. Note that for async calls this typically will be a successful
+ * result (0) unless of course you've used bogus parameters, or the system
+ * is out of memory. In the async case the callback is expected to do a
+ * bit more homework to figure out what happened, unfortunately the only
+ * information passed today on error is the fact that no firmware was
+ * found so we can only assume -ENOENT on async calls if the firmware is
+ * NULL.
+ *
+ * Errors you can expect:
+ *
+ * API specific:
+ *
+ * 0: success for sync, for async it means request was sent
+ * -EINVAL: invalid parameters or request
+ * -ENOENT: files not found
+ *
+ * System environment:
+ *
+ * -ENOMEM: memory pressure on system
+ * -ENODEV: out of number of devices to test
+ * -EINVAL: an unexpected error has occurred
+ * @req_firmware: if @sync_direct is true this is set to
+ * request_firmware_direct(), otherwise request_firmware()
+ */
+struct test_config {
+ char *name;
+ bool into_buf;
+ size_t buf_size;
+ size_t file_offset;
+ bool partial;
+ bool sync_direct;
+ bool send_uevent;
+ u8 num_requests;
+ u8 read_fw_idx;
+ char *upload_name;
+
+ /*
+ * These below don't belong her but we'll move them once we create
+ * a struct fw_test_device and stuff the misc_dev under there later.
+ */
+ struct test_batched_req *reqs;
+ int test_result;
+ int (*req_firmware)(const struct firmware **fw, const char *name,
+ struct device *device);
+};
+
+struct upload_inject_err {
+ const char *prog;
+ enum fw_upload_err err_code;
+};
+
+struct test_firmware_upload {
+ char *name;
+ struct list_head node;
+ char *buf;
+ size_t size;
+ bool cancel_request;
+ struct upload_inject_err inject;
+ struct fw_upload *fwl;
+};
+
+static struct test_config *test_fw_config;
+
+static struct test_firmware_upload *upload_lookup_name(const char *name)
+{
+ struct test_firmware_upload *tst;
+
+ list_for_each_entry(tst, &test_upload_list, node)
+ if (strncmp(name, tst->name, strlen(tst->name)) == 0)
+ return tst;
+
+ return NULL;
+}
+
+static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ ssize_t rc = 0;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_firmware)
+ rc = simple_read_from_buffer(buf, size, offset,
+ test_firmware->data,
+ test_firmware->size);
+ mutex_unlock(&test_fw_mutex);
+ return rc;
+}
+
+static const struct file_operations test_fw_fops = {
+ .owner = THIS_MODULE,
+ .read = test_fw_misc_read,
+};
+
+static void __test_release_all_firmware(void)
+{
+ struct test_batched_req *req;
+ u8 i;
+
+ if (!test_fw_config->reqs)
+ return;
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (req->fw) {
+ if (req->fw_buf) {
+ kfree_const(req->fw_buf);
+ req->fw_buf = NULL;
+ }
+ release_firmware(req->fw);
+ req->fw = NULL;
+ }
+ }
+
+ vfree(test_fw_config->reqs);
+ test_fw_config->reqs = NULL;
+}
+
+static void test_release_all_firmware(void)
+{
+ mutex_lock(&test_fw_mutex);
+ __test_release_all_firmware();
+ mutex_unlock(&test_fw_mutex);
+}
+
+
+static void __test_firmware_config_free(void)
+{
+ __test_release_all_firmware();
+ kfree_const(test_fw_config->name);
+ test_fw_config->name = NULL;
+}
+
+/*
+ * XXX: move to kstrncpy() once merged.
+ *
+ * Users should use kfree_const() when freeing these.
+ */
+static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
+{
+ *dst = kstrndup(name, count, gfp);
+ if (!*dst)
+ return -ENOMEM;
+ return count;
+}
+
+static int __test_firmware_config_init(void)
+{
+ int ret;
+
+ ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
+ strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
+ if (ret < 0)
+ goto out;
+
+ test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
+ test_fw_config->send_uevent = true;
+ test_fw_config->into_buf = false;
+ test_fw_config->buf_size = TEST_FIRMWARE_BUF_SIZE;
+ test_fw_config->file_offset = 0;
+ test_fw_config->partial = false;
+ test_fw_config->sync_direct = false;
+ test_fw_config->req_firmware = request_firmware;
+ test_fw_config->test_result = 0;
+ test_fw_config->reqs = NULL;
+ test_fw_config->upload_name = NULL;
+
+ return 0;
+
+out:
+ __test_firmware_config_free();
+ return ret;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+
+ __test_firmware_config_free();
+
+ ret = __test_firmware_config_init();
+ if (ret < 0) {
+ ret = -ENOMEM;
+ pr_err("could not alloc settings for config trigger: %d\n",
+ ret);
+ goto out;
+ }
+
+ pr_info("reset\n");
+ ret = count;
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+
+ mutex_lock(&test_fw_mutex);
+
+ len += scnprintf(buf, PAGE_SIZE - len,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ if (test_fw_config->name)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "name:\t%s\n",
+ test_fw_config->name);
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "name:\tEMPTY\n");
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "num_requests:\t%u\n", test_fw_config->num_requests);
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "send_uevent:\t\t%s\n",
+ test_fw_config->send_uevent ?
+ "FW_ACTION_UEVENT" :
+ "FW_ACTION_NOUEVENT");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "into_buf:\t\t%s\n",
+ test_fw_config->into_buf ? "true" : "false");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "buf_size:\t%zu\n", test_fw_config->buf_size);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "file_offset:\t%zu\n", test_fw_config->file_offset);
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "partial:\t\t%s\n",
+ test_fw_config->partial ? "true" : "false");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "sync_direct:\t\t%s\n",
+ test_fw_config->sync_direct ? "true" : "false");
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
+ if (test_fw_config->upload_name)
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "upload_name:\t%s\n",
+ test_fw_config->upload_name);
+ else
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "upload_name:\tEMPTY\n");
+
+ mutex_unlock(&test_fw_mutex);
+
+ return len;
+}
+static DEVICE_ATTR_RO(config);
+
+static ssize_t config_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ kfree_const(test_fw_config->name);
+ ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+/*
+ * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
+ */
+static ssize_t config_test_show_str(char *dst,
+ char *src)
+{
+ int len;
+
+ mutex_lock(&test_fw_mutex);
+ len = snprintf(dst, PAGE_SIZE, "%s\n", src);
+ mutex_unlock(&test_fw_mutex);
+
+ return len;
+}
+
+static inline int __test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
+{
+ int ret;
+
+ if (kstrtobool(buf, cfg) < 0)
+ ret = -EINVAL;
+ else
+ ret = size;
+
+ return ret;
+}
+
+static int test_dev_config_update_bool(const char *buf, size_t size,
+ bool *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_bool(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+static ssize_t test_dev_config_show_bool(char *buf, bool val)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static int __test_dev_config_update_size_t(
+ const char *buf,
+ size_t size,
+ size_t *cfg)
+{
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 10, &new);
+ if (ret)
+ return ret;
+
+ *(size_t *)cfg = new;
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t test_dev_config_show_size_t(char *buf, size_t val)
+{
+ return snprintf(buf, PAGE_SIZE, "%zu\n", val);
+}
+
+static ssize_t test_dev_config_show_int(char *buf, int val)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ *(u8 *)cfg = val;
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+ int ret;
+
+ mutex_lock(&test_fw_mutex);
+ ret = __test_dev_config_update_u8(buf, size, cfg);
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+static ssize_t test_dev_config_show_u8(char *buf, u8 val)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t config_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return config_test_show_str(buf, test_fw_config->name);
+}
+static DEVICE_ATTR_RW(config_name);
+
+static ssize_t config_upload_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ int ret = count;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(buf);
+ if (tst)
+ test_fw_config->upload_name = tst->name;
+ else
+ ret = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+
+ return ret;
+}
+
+static ssize_t config_upload_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return config_test_show_str(buf, test_fw_config->upload_name);
+}
+static DEVICE_ATTR_RW(config_upload_name);
+
+static ssize_t config_num_requests_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+
+ rc = __test_dev_config_update_u8(buf, count,
+ &test_fw_config->num_requests);
+ mutex_unlock(&test_fw_mutex);
+
+out:
+ return rc;
+}
+
+static ssize_t config_num_requests_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_u8(buf, test_fw_config->num_requests);
+}
+static DEVICE_ATTR_RW(config_num_requests);
+
+static ssize_t config_into_buf_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf,
+ count,
+ &test_fw_config->into_buf);
+}
+
+static ssize_t config_into_buf_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->into_buf);
+}
+static DEVICE_ATTR_RW(config_into_buf);
+
+static ssize_t config_buf_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+
+ rc = __test_dev_config_update_size_t(buf, count,
+ &test_fw_config->buf_size);
+ mutex_unlock(&test_fw_mutex);
+
+out:
+ return rc;
+}
+
+static ssize_t config_buf_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_size_t(buf, test_fw_config->buf_size);
+}
+static DEVICE_ATTR_RW(config_buf_size);
+
+static ssize_t config_file_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ mutex_lock(&test_fw_mutex);
+ if (test_fw_config->reqs) {
+ pr_err("Must call release_all_firmware prior to changing config\n");
+ rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
+ goto out;
+ }
+
+ rc = __test_dev_config_update_size_t(buf, count,
+ &test_fw_config->file_offset);
+ mutex_unlock(&test_fw_mutex);
+
+out:
+ return rc;
+}
+
+static ssize_t config_file_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_size_t(buf, test_fw_config->file_offset);
+}
+static DEVICE_ATTR_RW(config_file_offset);
+
+static ssize_t config_partial_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf,
+ count,
+ &test_fw_config->partial);
+}
+
+static ssize_t config_partial_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->partial);
+}
+static DEVICE_ATTR_RW(config_partial);
+
+static ssize_t config_sync_direct_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = test_dev_config_update_bool(buf, count,
+ &test_fw_config->sync_direct);
+
+ if (rc == count)
+ test_fw_config->req_firmware = test_fw_config->sync_direct ?
+ request_firmware_direct :
+ request_firmware;
+ return rc;
+}
+
+static ssize_t config_sync_direct_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
+}
+static DEVICE_ATTR_RW(config_sync_direct);
+
+static ssize_t config_send_uevent_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_bool(buf, count,
+ &test_fw_config->send_uevent);
+}
+
+static ssize_t config_send_uevent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
+}
+static DEVICE_ATTR_RW(config_send_uevent);
+
+static ssize_t config_read_fw_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return test_dev_config_update_u8(buf, count,
+ &test_fw_config->read_fw_idx);
+}
+
+static ssize_t config_read_fw_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
+}
+static DEVICE_ATTR_RW(config_read_fw_idx);
+
+
+static ssize_t trigger_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char *name;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ pr_info("loading '%s'\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
+ test_firmware = NULL;
+ rc = request_firmware(&test_firmware, name, dev);
+ if (rc) {
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ goto out;
+ }
+ pr_info("loaded: %zu\n", test_firmware->size);
+ rc = count;
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ kfree(name);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_request);
+
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+extern struct list_head efi_embedded_fw_list;
+extern bool efi_embedded_fw_checked;
+
+static ssize_t trigger_request_platform_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const u8 test_data[] = {
+ 0x55, 0xaa, 0x55, 0xaa, 0x01, 0x02, 0x03, 0x04,
+ 0x55, 0xaa, 0x55, 0xaa, 0x05, 0x06, 0x07, 0x08,
+ 0x55, 0xaa, 0x55, 0xaa, 0x10, 0x20, 0x30, 0x40,
+ 0x55, 0xaa, 0x55, 0xaa, 0x50, 0x60, 0x70, 0x80
+ };
+ struct efi_embedded_fw efi_embedded_fw;
+ const struct firmware *firmware = NULL;
+ bool saved_efi_embedded_fw_checked;
+ char *name;
+ int rc;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ pr_info("inserting test platform fw '%s'\n", name);
+ efi_embedded_fw.name = name;
+ efi_embedded_fw.data = (void *)test_data;
+ efi_embedded_fw.length = sizeof(test_data);
+ list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
+ saved_efi_embedded_fw_checked = efi_embedded_fw_checked;
+ efi_embedded_fw_checked = true;
+
+ pr_info("loading '%s'\n", name);
+ rc = firmware_request_platform(&firmware, name, dev);
+ if (rc) {
+ pr_info("load of '%s' failed: %d\n", name, rc);
+ goto out;
+ }
+ if (firmware->size != sizeof(test_data) ||
+ memcmp(firmware->data, test_data, sizeof(test_data)) != 0) {
+ pr_info("firmware contents mismatch for '%s'\n", name);
+ rc = -EINVAL;
+ goto out;
+ }
+ pr_info("loaded: %zu\n", firmware->size);
+ rc = count;
+
+out:
+ efi_embedded_fw_checked = saved_efi_embedded_fw_checked;
+ release_firmware(firmware);
+ list_del(&efi_embedded_fw.list);
+ kfree(name);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_request_platform);
+#endif
+
+static DECLARE_COMPLETION(async_fw_done);
+
+static void trigger_async_request_cb(const struct firmware *fw, void *context)
+{
+ test_firmware = fw;
+ complete(&async_fw_done);
+}
+
+static ssize_t trigger_async_request_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char *name;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ pr_info("loading '%s'\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ test_firmware = NULL;
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
+ rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
+ NULL, trigger_async_request_cb);
+ if (rc) {
+ pr_info("async load of '%s' failed: %d\n", name, rc);
+ kfree(name);
+ goto out;
+ }
+ /* Free 'name' ASAP, to test for race conditions */
+ kfree(name);
+
+ wait_for_completion(&async_fw_done);
+
+ if (test_firmware) {
+ pr_info("loaded: %zu\n", test_firmware->size);
+ rc = count;
+ } else {
+ pr_err("failed to async load firmware\n");
+ rc = -ENOMEM;
+ }
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_async_request);
+
+static ssize_t trigger_custom_fallback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ char *name;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ pr_info("loading '%s' using custom fallback mechanism\n", name);
+
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ if (test_fw_config->reqs)
+ __test_release_all_firmware();
+ test_firmware = NULL;
+ rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
+ dev, GFP_KERNEL, NULL,
+ trigger_async_request_cb);
+ if (rc) {
+ pr_info("async load of '%s' failed: %d\n", name, rc);
+ kfree(name);
+ goto out;
+ }
+ /* Free 'name' ASAP, to test for race conditions */
+ kfree(name);
+
+ wait_for_completion(&async_fw_done);
+
+ if (test_firmware) {
+ pr_info("loaded: %zu\n", test_firmware->size);
+ rc = count;
+ } else {
+ pr_err("failed to async load firmware\n");
+ rc = -ENODEV;
+ }
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_custom_fallback);
+
+static int test_fw_run_batch_request(void *data)
+{
+ struct test_batched_req *req = data;
+
+ if (!req) {
+ test_fw_config->test_result = -EINVAL;
+ return -EINVAL;
+ }
+
+ if (test_fw_config->into_buf) {
+ void *test_buf;
+
+ test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
+ if (!test_buf)
+ return -ENOMEM;
+
+ if (test_fw_config->partial)
+ req->rc = request_partial_firmware_into_buf
+ (&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ test_fw_config->buf_size,
+ test_fw_config->file_offset);
+ else
+ req->rc = request_firmware_into_buf
+ (&req->fw,
+ req->name,
+ req->dev,
+ test_buf,
+ test_fw_config->buf_size);
+ if (!req->fw)
+ kfree(test_buf);
+ else
+ req->fw_buf = test_buf;
+ } else {
+ req->rc = test_fw_config->req_firmware(&req->fw,
+ req->name,
+ req->dev);
+ }
+
+ if (req->rc) {
+ pr_info("#%u: batched sync load failed: %d\n",
+ req->idx, req->rc);
+ if (!test_fw_config->test_result)
+ test_fw_config->test_result = req->rc;
+ } else if (req->fw) {
+ req->sent = true;
+ pr_info("#%u: batched sync loaded %zu\n",
+ req->idx, req->fw->size);
+ }
+ complete(&req->completion);
+
+ req->task = NULL;
+
+ return 0;
+}
+
+/*
+ * We use a kthread as otherwise the kernel serializes all our sync requests
+ * and we would not be able to mimic batched requests on a sync call. Batched
+ * requests on a sync call can for instance happen on a device driver when
+ * multiple cards are used and firmware loading happens outside of probe.
+ */
+static ssize_t trigger_batched_requests_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_batched_req *req;
+ int rc;
+ u8 i;
+
+ mutex_lock(&test_fw_mutex);
+
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
+ test_fw_config->reqs =
+ vzalloc(array3_size(sizeof(struct test_batched_req),
+ test_fw_config->num_requests, 2));
+ if (!test_fw_config->reqs) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+
+ pr_info("batched sync firmware loading '%s' %u times\n",
+ test_fw_config->name, test_fw_config->num_requests);
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ req->fw = NULL;
+ req->idx = i;
+ req->name = test_fw_config->name;
+ req->fw_buf = NULL;
+ req->dev = dev;
+ init_completion(&req->completion);
+ req->task = kthread_run(test_fw_run_batch_request, req,
+ "%s-%u", KBUILD_MODNAME, req->idx);
+ if (!req->task || IS_ERR(req->task)) {
+ pr_err("Setting up thread %u failed\n", req->idx);
+ req->task = NULL;
+ rc = -ENOMEM;
+ goto out_bail;
+ }
+ }
+
+ rc = count;
+
+ /*
+ * We require an explicit release to enable more time and delay of
+ * calling release_firmware() to improve our chances of forcing a
+ * batched request. If we instead called release_firmware() right away
+ * then we might miss on an opportunity of having a successful firmware
+ * request pass on the opportunity to be come a batched request.
+ */
+
+out_bail:
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (req->task || req->sent)
+ wait_for_completion(&req->completion);
+ }
+
+ /* Override any worker error if we had a general setup error */
+ if (rc < 0)
+ test_fw_config->test_result = rc;
+
+out_unlock:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_batched_requests);
+
+/*
+ * We wait for each callback to return with the lock held, no need to lock here
+ */
+static void trigger_batched_cb(const struct firmware *fw, void *context)
+{
+ struct test_batched_req *req = context;
+
+ if (!req) {
+ test_fw_config->test_result = -EINVAL;
+ return;
+ }
+
+ /* forces *some* batched requests to queue up */
+ if (!req->idx)
+ ssleep(2);
+
+ req->fw = fw;
+
+ /*
+ * Unfortunately the firmware API gives us nothing other than a null FW
+ * if the firmware was not found on async requests. Best we can do is
+ * just assume -ENOENT. A better API would pass the actual return
+ * value to the callback.
+ */
+ if (!fw && !test_fw_config->test_result)
+ test_fw_config->test_result = -ENOENT;
+
+ complete(&req->completion);
+}
+
+static
+ssize_t trigger_batched_requests_async_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_batched_req *req;
+ bool send_uevent;
+ int rc;
+ u8 i;
+
+ mutex_lock(&test_fw_mutex);
+
+ if (test_fw_config->reqs) {
+ rc = -EBUSY;
+ goto out_bail;
+ }
+
+ test_fw_config->reqs =
+ vzalloc(array3_size(sizeof(struct test_batched_req),
+ test_fw_config->num_requests, 2));
+ if (!test_fw_config->reqs) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pr_info("batched loading '%s' custom fallback mechanism %u times\n",
+ test_fw_config->name, test_fw_config->num_requests);
+
+ send_uevent = test_fw_config->send_uevent ? FW_ACTION_UEVENT :
+ FW_ACTION_NOUEVENT;
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ req->name = test_fw_config->name;
+ req->fw_buf = NULL;
+ req->fw = NULL;
+ req->idx = i;
+ init_completion(&req->completion);
+ rc = request_firmware_nowait(THIS_MODULE, send_uevent,
+ req->name,
+ dev, GFP_KERNEL, req,
+ trigger_batched_cb);
+ if (rc) {
+ pr_info("#%u: batched async load failed setup: %d\n",
+ i, rc);
+ req->rc = rc;
+ goto out_bail;
+ } else
+ req->sent = true;
+ }
+
+ rc = count;
+
+out_bail:
+
+ /*
+ * We require an explicit release to enable more time and delay of
+ * calling release_firmware() to improve our chances of forcing a
+ * batched request. If we instead called release_firmware() right away
+ * then we might miss on an opportunity of having a successful firmware
+ * request pass on the opportunity to be come a batched request.
+ */
+
+ for (i = 0; i < test_fw_config->num_requests; i++) {
+ req = &test_fw_config->reqs[i];
+ if (req->sent)
+ wait_for_completion(&req->completion);
+ }
+
+ /* Override any worker error if we had a general setup error */
+ if (rc < 0)
+ test_fw_config->test_result = rc;
+
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_WO(trigger_batched_requests_async);
+
+static void upload_release(struct test_firmware_upload *tst)
+{
+ firmware_upload_unregister(tst->fwl);
+ kfree(tst->buf);
+ kfree(tst->name);
+ kfree(tst);
+}
+
+static void upload_release_all(void)
+{
+ struct test_firmware_upload *tst, *tmp;
+
+ list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
+ list_del(&tst->node);
+ upload_release(tst);
+ }
+ test_fw_config->upload_name = NULL;
+}
+
+/*
+ * This table is replicated from .../firmware_loader/sysfs_upload.c
+ * and needs to be kept in sync.
+ */
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+};
+
+static void upload_err_inject_error(struct test_firmware_upload *tst,
+ const u8 *p, const char *prog)
+{
+ enum fw_upload_err err;
+
+ for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) {
+ if (strncmp(p, fw_upload_err_str[err],
+ strlen(fw_upload_err_str[err])) == 0) {
+ tst->inject.prog = prog;
+ tst->inject.err_code = err;
+ return;
+ }
+ }
+}
+
+static void upload_err_inject_prog(struct test_firmware_upload *tst,
+ const u8 *p)
+{
+ static const char * const progs[] = {
+ "preparing:", "transferring:", "programming:"
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(progs); i++) {
+ if (strncmp(p, progs[i], strlen(progs[i])) == 0) {
+ upload_err_inject_error(tst, p + strlen(progs[i]),
+ progs[i]);
+ return;
+ }
+ }
+}
+
+#define FIVE_MINUTES_MS (5 * 60 * 1000)
+static enum fw_upload_err
+fw_upload_wait_on_cancel(struct test_firmware_upload *tst)
+{
+ int ms_delay;
+
+ for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) {
+ msleep(100);
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+ }
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ enum fw_upload_err ret = FW_UPLOAD_ERR_NONE;
+ const char *progress = "preparing:";
+
+ tst->cancel_request = false;
+
+ if (!size || size > TEST_UPLOAD_MAX_SIZE) {
+ ret = FW_UPLOAD_ERR_INVALID_SIZE;
+ goto err_out;
+ }
+
+ if (strncmp(data, "inject:", strlen("inject:")) == 0)
+ upload_err_inject_prog(tst, data + strlen("inject:"));
+
+ memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE);
+ tst->size = size;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ ret = fw_upload_wait_on_cancel(tst);
+ else
+ ret = tst->inject.err_code;
+
+err_out:
+ /*
+ * The cleanup op only executes if the prepare op succeeds.
+ * If the prepare op fails, it must do it's own clean-up.
+ */
+ tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+ tst->inject.prog = NULL;
+
+ return ret;
+}
+
+static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ const char *progress = "transferring:";
+ u32 blk_size;
+
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size);
+ memcpy(tst->buf + offset, data + offset, blk_size);
+
+ *written = blk_size;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ return fw_upload_wait_on_cancel(tst);
+
+ return tst->inject.err_code;
+}
+
+static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+ const char *progress = "programming:";
+
+ if (tst->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
+ strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
+ return FW_UPLOAD_ERR_NONE;
+
+ if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
+ return fw_upload_wait_on_cancel(tst);
+
+ return tst->inject.err_code;
+}
+
+static void test_fw_upload_cancel(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+
+ tst->cancel_request = true;
+}
+
+static void test_fw_cleanup(struct fw_upload *fwl)
+{
+ struct test_firmware_upload *tst = fwl->dd_handle;
+
+ tst->inject.err_code = FW_UPLOAD_ERR_NONE;
+ tst->inject.prog = NULL;
+}
+
+static const struct fw_upload_ops upload_test_ops = {
+ .prepare = test_fw_upload_prepare,
+ .write = test_fw_upload_write,
+ .poll_complete = test_fw_upload_complete,
+ .cancel = test_fw_upload_cancel,
+ .cleanup = test_fw_cleanup
+};
+
+static ssize_t upload_register_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ struct fw_upload *fwl;
+ char *name;
+ int ret;
+
+ name = kstrndup(buf, count, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(name);
+ if (tst) {
+ ret = -EEXIST;
+ goto free_name;
+ }
+
+ tst = kzalloc(sizeof(*tst), GFP_KERNEL);
+ if (!tst) {
+ ret = -ENOMEM;
+ goto free_name;
+ }
+
+ tst->name = name;
+ tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL);
+ if (!tst->buf) {
+ ret = -ENOMEM;
+ goto free_tst;
+ }
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, tst->name,
+ &upload_test_ops, tst);
+ if (IS_ERR(fwl)) {
+ ret = PTR_ERR(fwl);
+ goto free_buf;
+ }
+
+ tst->fwl = fwl;
+ list_add_tail(&tst->node, &test_upload_list);
+ mutex_unlock(&test_fw_mutex);
+ return count;
+
+free_buf:
+ kfree(tst->buf);
+
+free_tst:
+ kfree(tst);
+
+free_name:
+ mutex_unlock(&test_fw_mutex);
+ kfree(name);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(upload_register);
+
+static ssize_t upload_unregister_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct test_firmware_upload *tst;
+ int ret = count;
+
+ mutex_lock(&test_fw_mutex);
+ tst = upload_lookup_name(buf);
+ if (!tst) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_fw_config->upload_name == tst->name)
+ test_fw_config->upload_name = NULL;
+
+ list_del(&tst->node);
+ upload_release(tst);
+
+out:
+ mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+static DEVICE_ATTR_WO(upload_unregister);
+
+static ssize_t test_result_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return test_dev_config_show_int(buf, test_fw_config->test_result);
+}
+static DEVICE_ATTR_RO(test_result);
+
+static ssize_t release_all_firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ test_release_all_firmware();
+ return count;
+}
+static DEVICE_ATTR_WO(release_all_firmware);
+
+static ssize_t read_firmware_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct test_batched_req *req;
+ u8 idx;
+ ssize_t rc = 0;
+
+ mutex_lock(&test_fw_mutex);
+
+ idx = test_fw_config->read_fw_idx;
+ if (idx >= test_fw_config->num_requests) {
+ rc = -ERANGE;
+ goto out;
+ }
+
+ if (!test_fw_config->reqs) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ req = &test_fw_config->reqs[idx];
+ if (!req->fw) {
+ pr_err("#%u: failed to async load firmware\n", idx);
+ rc = -ENOENT;
+ goto out;
+ }
+
+ pr_info("#%u: loaded %zu\n", idx, req->fw->size);
+
+ if (req->fw->size > PAGE_SIZE) {
+ pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(buf, req->fw->data, req->fw->size);
+
+ rc = req->fw->size;
+out:
+ mutex_unlock(&test_fw_mutex);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(read_firmware);
+
+static ssize_t upload_read_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct test_firmware_upload *tst = NULL;
+ struct test_firmware_upload *tst_iter;
+ int ret = -EINVAL;
+
+ if (!test_fw_config->upload_name) {
+ pr_err("Set config_upload_name before using upload_read\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&test_fw_mutex);
+ list_for_each_entry(tst_iter, &test_upload_list, node)
+ if (tst_iter->name == test_fw_config->upload_name) {
+ tst = tst_iter;
+ break;
+ }
+
+ if (!tst) {
+ pr_err("Firmware name not found: %s\n",
+ test_fw_config->upload_name);
+ goto out;
+ }
+
+ if (tst->size > PAGE_SIZE) {
+ pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
+ goto out;
+ }
+
+ memcpy(buf, tst->buf, tst->size);
+ ret = tst->size;
+out:
+ mutex_unlock(&test_fw_mutex);
+ return ret;
+}
+static DEVICE_ATTR_RO(upload_read);
+
+#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
+
+static struct attribute *test_dev_attrs[] = {
+ TEST_FW_DEV_ATTR(reset),
+
+ TEST_FW_DEV_ATTR(config),
+ TEST_FW_DEV_ATTR(config_name),
+ TEST_FW_DEV_ATTR(config_num_requests),
+ TEST_FW_DEV_ATTR(config_into_buf),
+ TEST_FW_DEV_ATTR(config_buf_size),
+ TEST_FW_DEV_ATTR(config_file_offset),
+ TEST_FW_DEV_ATTR(config_partial),
+ TEST_FW_DEV_ATTR(config_sync_direct),
+ TEST_FW_DEV_ATTR(config_send_uevent),
+ TEST_FW_DEV_ATTR(config_read_fw_idx),
+ TEST_FW_DEV_ATTR(config_upload_name),
+
+ /* These don't use the config at all - they could be ported! */
+ TEST_FW_DEV_ATTR(trigger_request),
+ TEST_FW_DEV_ATTR(trigger_async_request),
+ TEST_FW_DEV_ATTR(trigger_custom_fallback),
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+ TEST_FW_DEV_ATTR(trigger_request_platform),
+#endif
+
+ /* These use the config and can use the test_result */
+ TEST_FW_DEV_ATTR(trigger_batched_requests),
+ TEST_FW_DEV_ATTR(trigger_batched_requests_async),
+
+ TEST_FW_DEV_ATTR(release_all_firmware),
+ TEST_FW_DEV_ATTR(test_result),
+ TEST_FW_DEV_ATTR(read_firmware),
+ TEST_FW_DEV_ATTR(upload_read),
+ TEST_FW_DEV_ATTR(upload_register),
+ TEST_FW_DEV_ATTR(upload_unregister),
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(test_dev);
+
+static struct miscdevice test_fw_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "test_firmware",
+ .fops = &test_fw_fops,
+ .groups = test_dev_groups,
+};
+
+static int __init test_firmware_init(void)
+{
+ int rc;
+
+ test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
+ if (!test_fw_config)
+ return -ENOMEM;
+
+ rc = __test_firmware_config_init();
+ if (rc) {
+ kfree(test_fw_config);
+ pr_err("could not init firmware test config: %d\n", rc);
+ return rc;
+ }
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
+ __test_firmware_config_free();
+ kfree(test_fw_config);
+ pr_err("could not register misc device: %d\n", rc);
+ return rc;
+ }
+
+ pr_warn("interface ready\n");
+
+ return 0;
+}
+
+module_init(test_firmware_init);
+
+static void __exit test_firmware_exit(void)
+{
+ mutex_lock(&test_fw_mutex);
+ release_firmware(test_firmware);
+ misc_deregister(&test_fw_misc_device);
+ upload_release_all();
+ __test_firmware_config_free();
+ kfree(test_fw_config);
+ mutex_unlock(&test_fw_mutex);
+
+ pr_warn("removed interface\n");
+}
+
+module_exit(test_firmware_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_fortify/read_overflow-memchr.c b/lib/test_fortify/read_overflow-memchr.c
new file mode 100644
index 000000000..2743084b3
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memchr.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memchr(small, 0x7A, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow-memchr_inv.c b/lib/test_fortify/read_overflow-memchr_inv.c
new file mode 100644
index 000000000..b26e1f1bc
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memchr_inv.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memchr_inv(small, 0x7A, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow-memcmp.c b/lib/test_fortify/read_overflow-memcmp.c
new file mode 100644
index 000000000..d5d301ff6
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memcmp.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcmp(small, large, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow-memscan.c b/lib/test_fortify/read_overflow-memscan.c
new file mode 100644
index 000000000..c1a97f2df
--- /dev/null
+++ b/lib/test_fortify/read_overflow-memscan.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memscan(small, 0x7A, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2-memcmp.c b/lib/test_fortify/read_overflow2-memcmp.c
new file mode 100644
index 000000000..c6091e640
--- /dev/null
+++ b/lib/test_fortify/read_overflow2-memcmp.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcmp(large, small, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2-memcpy.c b/lib/test_fortify/read_overflow2-memcpy.c
new file mode 100644
index 000000000..07b62e56c
--- /dev/null
+++ b/lib/test_fortify/read_overflow2-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(large, instance.buf, sizeof(large))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2-memmove.c b/lib/test_fortify/read_overflow2-memmove.c
new file mode 100644
index 000000000..34edfab04
--- /dev/null
+++ b/lib/test_fortify/read_overflow2-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(large, instance.buf, sizeof(large))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2_field-memcpy.c b/lib/test_fortify/read_overflow2_field-memcpy.c
new file mode 100644
index 000000000..de9569266
--- /dev/null
+++ b/lib/test_fortify/read_overflow2_field-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(large, instance.buf, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/read_overflow2_field-memmove.c b/lib/test_fortify/read_overflow2_field-memmove.c
new file mode 100644
index 000000000..6cc2724c8
--- /dev/null
+++ b/lib/test_fortify/read_overflow2_field-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(large, instance.buf, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/test_fortify.h b/lib/test_fortify/test_fortify.h
new file mode 100644
index 000000000..d22664fff
--- /dev/null
+++ b/lib/test_fortify/test_fortify.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+void do_fortify_tests(void);
+
+#define __BUF_SMALL 16
+#define __BUF_LARGE 32
+struct fortify_object {
+ int a;
+ char buf[__BUF_SMALL];
+ int c;
+};
+
+#define LITERAL_SMALL "AAAAAAAAAAAAAAA"
+#define LITERAL_LARGE "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
+const char small_src[__BUF_SMALL] = LITERAL_SMALL;
+const char large_src[__BUF_LARGE] = LITERAL_LARGE;
+
+char small[__BUF_SMALL];
+char large[__BUF_LARGE];
+struct fortify_object instance;
+size_t size;
+
+void do_fortify_tests(void)
+{
+ /* Normal initializations. */
+ memset(&instance, 0x32, sizeof(instance));
+ memset(small, 0xA5, sizeof(small));
+ memset(large, 0x5A, sizeof(large));
+
+ TEST;
+}
diff --git a/lib/test_fortify/write_overflow-memcpy.c b/lib/test_fortify/write_overflow-memcpy.c
new file mode 100644
index 000000000..3b3984e42
--- /dev/null
+++ b/lib/test_fortify/write_overflow-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(instance.buf, large_src, sizeof(large_src))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-memmove.c b/lib/test_fortify/write_overflow-memmove.c
new file mode 100644
index 000000000..640437c3b
--- /dev/null
+++ b/lib/test_fortify/write_overflow-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(instance.buf, large_src, sizeof(large_src))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-memset.c b/lib/test_fortify/write_overflow-memset.c
new file mode 100644
index 000000000..36e34908c
--- /dev/null
+++ b/lib/test_fortify/write_overflow-memset.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memset(instance.buf, 0x5A, sizeof(large_src))
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strcpy-lit.c b/lib/test_fortify/write_overflow-strcpy-lit.c
new file mode 100644
index 000000000..51effb3e5
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strcpy-lit.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strcpy(small, LITERAL_LARGE)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strcpy.c b/lib/test_fortify/write_overflow-strcpy.c
new file mode 100644
index 000000000..84f1c56a6
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strcpy(small, large_src)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strlcpy-src.c b/lib/test_fortify/write_overflow-strlcpy-src.c
new file mode 100644
index 000000000..91bf83ebd
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strlcpy-src.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strlcpy(small, large_src, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strlcpy.c b/lib/test_fortify/write_overflow-strlcpy.c
new file mode 100644
index 000000000..1883db7c0
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strlcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strlcpy(instance.buf, large_src, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strncpy-src.c b/lib/test_fortify/write_overflow-strncpy-src.c
new file mode 100644
index 000000000..8dcfb8c78
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strncpy-src.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strncpy(small, large_src, sizeof(small) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strncpy.c b/lib/test_fortify/write_overflow-strncpy.c
new file mode 100644
index 000000000..b85f079c8
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strncpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strncpy(instance.buf, large_src, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow-strscpy.c b/lib/test_fortify/write_overflow-strscpy.c
new file mode 100644
index 000000000..38feddf37
--- /dev/null
+++ b/lib/test_fortify/write_overflow-strscpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ strscpy(instance.buf, large_src, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memcpy.c b/lib/test_fortify/write_overflow_field-memcpy.c
new file mode 100644
index 000000000..28cc81058
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memcpy.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memcpy(instance.buf, large, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memmove.c b/lib/test_fortify/write_overflow_field-memmove.c
new file mode 100644
index 000000000..377fcf9bb
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memmove.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memmove(instance.buf, large, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fortify/write_overflow_field-memset.c b/lib/test_fortify/write_overflow_field-memset.c
new file mode 100644
index 000000000..2331da269
--- /dev/null
+++ b/lib/test_fortify/write_overflow_field-memset.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define TEST \
+ memset(instance.buf, 0x42, sizeof(instance.buf) + 1)
+
+#include "test_fortify.h"
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
new file mode 100644
index 000000000..24de0e5ff
--- /dev/null
+++ b/lib/test_fprobe.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_fprobe.c - simple sanity test for fprobe
+ */
+
+#include <linux/kernel.h>
+#include <linux/fprobe.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static struct kunit *current_test;
+
+static u32 rand1, entry_val, exit_val;
+
+/* Use indirect calls to avoid inlining the target functions */
+static u32 (*target)(u32 value);
+static u32 (*target2)(u32 value);
+static u32 (*target_nest)(u32 value, u32 (*nest)(u32));
+static unsigned long target_ip;
+static unsigned long target2_ip;
+static unsigned long target_nest_ip;
+static int entry_return_value;
+
+static noinline u32 fprobe_selftest_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 fprobe_selftest_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static noinline u32 fprobe_selftest_nest_target(u32 value, u32 (*nest)(u32))
+{
+ return nest(value + 2);
+}
+
+static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ /* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
+ if (ip != target_ip)
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ entry_val = (rand1 / div_factor);
+ if (fp->entry_data_size) {
+ KUNIT_EXPECT_NOT_NULL(current_test, data);
+ if (data)
+ *(u32 *)data = entry_val;
+ } else
+ KUNIT_EXPECT_NULL(current_test, data);
+
+ return entry_return_value;
+}
+
+static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ if (ip != target_ip) {
+ KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ } else
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor));
+ KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor));
+ exit_val = entry_val + div_factor;
+ if (fp->entry_data_size) {
+ KUNIT_EXPECT_NOT_NULL(current_test, data);
+ if (data)
+ KUNIT_EXPECT_EQ(current_test, *(u32 *)data, entry_val);
+ } else
+ KUNIT_EXPECT_NULL(current_test, data);
+}
+
+static notrace int nest_entry_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ return 0;
+}
+
+static notrace void nest_exit_handler(struct fprobe *fp, unsigned long ip,
+ unsigned long ret_ip,
+ struct pt_regs *regs, void *data)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, ip, target_nest_ip);
+}
+
+/* Test entry only (no rethook) */
+static void test_fprobe_entry(struct kunit *test)
+{
+ struct fprobe fp_entry = {
+ .entry_handler = fp_entry_handler,
+ };
+
+ current_test = test;
+
+ /* Before register, unregister should be failed. */
+ KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry));
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry));
+}
+
+static void test_fprobe(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_syms(struct kunit *test)
+{
+ static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"};
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
+
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ entry_val = 0;
+ exit_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+/* Test private entry_data */
+static void test_fprobe_data(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ .entry_data_size = sizeof(u32),
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
+
+ target(rand1);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+/* Test nr_maxactive */
+static void test_fprobe_nest(struct kunit *test)
+{
+ static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_nest_target"};
+ struct fprobe fp = {
+ .entry_handler = nest_entry_handler,
+ .exit_handler = nest_exit_handler,
+ .nr_maxactive = 1,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
+
+ target_nest(rand1, target);
+ KUNIT_EXPECT_EQ(test, 1, fp.nmissed);
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static void test_fprobe_skip(struct kunit *test)
+{
+ struct fprobe fp = {
+ .entry_handler = fp_entry_handler,
+ .exit_handler = fp_exit_handler,
+ };
+
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
+
+ entry_return_value = 1;
+ entry_val = 0;
+ exit_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_NE(test, 0, entry_val);
+ KUNIT_EXPECT_EQ(test, 0, exit_val);
+ KUNIT_EXPECT_EQ(test, 0, fp.nmissed);
+ entry_return_value = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
+}
+
+static unsigned long get_ftrace_location(void *func)
+{
+ unsigned long size, addr = (unsigned long)func;
+
+ if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size)
+ return 0;
+
+ return ftrace_location_range(addr, addr + size - 1);
+}
+
+static int fprobe_test_init(struct kunit *test)
+{
+ rand1 = get_random_u32_above(div_factor);
+ target = fprobe_selftest_target;
+ target2 = fprobe_selftest_target2;
+ target_nest = fprobe_selftest_nest_target;
+ target_ip = get_ftrace_location(target);
+ target2_ip = get_ftrace_location(target2);
+ target_nest_ip = get_ftrace_location(target_nest);
+
+ return 0;
+}
+
+static struct kunit_case fprobe_testcases[] = {
+ KUNIT_CASE(test_fprobe_entry),
+ KUNIT_CASE(test_fprobe),
+ KUNIT_CASE(test_fprobe_syms),
+ KUNIT_CASE(test_fprobe_data),
+ KUNIT_CASE(test_fprobe_nest),
+ KUNIT_CASE(test_fprobe_skip),
+ {}
+};
+
+static struct kunit_suite fprobe_test_suite = {
+ .name = "fprobe_test",
+ .init = fprobe_test_init,
+ .test_cases = fprobe_testcases,
+};
+
+kunit_test_suites(&fprobe_test_suite);
+
diff --git a/lib/test_fpu.c b/lib/test_fpu.c
new file mode 100644
index 000000000..e82db19fe
--- /dev/null
+++ b/lib/test_fpu.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for using floating point operations inside a kernel module.
+ *
+ * This tests kernel_fpu_begin() and kernel_fpu_end() functions, especially
+ * when userland has modified the floating point control registers. The kernel
+ * state might depend on the state set by the userland thread that was active
+ * before a syscall.
+ *
+ * To facilitate the test, this module registers file
+ * /sys/kernel/debug/selftest_helpers/test_fpu, which when read causes a
+ * sequence of floating point operations. If the operations fail, either the
+ * read returns error status or the kernel crashes.
+ * If the operations succeed, the read returns "1\n".
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <asm/fpu/api.h>
+
+static int test_fpu(void)
+{
+ /*
+ * This sequence of operations tests that rounding mode is
+ * to nearest and that denormal numbers are supported.
+ * Volatile variables are used to avoid compiler optimizing
+ * the calculations away.
+ */
+ volatile double a, b, c, d, e, f, g;
+
+ a = 4.0;
+ b = 1e-15;
+ c = 1e-310;
+
+ /* Sets precision flag */
+ d = a + b;
+
+ /* Result depends on rounding mode */
+ e = a + b / 2;
+
+ /* Denormal and very large values */
+ f = b / c;
+
+ /* Depends on denormal support */
+ g = a + c * f;
+
+ if (d > a && e > a && g > a)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int test_fpu_get(void *data, u64 *val)
+{
+ int status = -EINVAL;
+
+ kernel_fpu_begin();
+ status = test_fpu();
+ kernel_fpu_end();
+
+ *val = 1;
+ return status;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
+static struct dentry *selftest_dir;
+
+static int __init test_fpu_init(void)
+{
+ selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
+ if (!selftest_dir)
+ return -ENOMEM;
+
+ debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
+ &test_fpu_fops);
+
+ return 0;
+}
+
+static void __exit test_fpu_exit(void)
+{
+ debugfs_remove(selftest_dir);
+}
+
+module_init(test_fpu_init);
+module_exit(test_fpu_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_free_pages.c b/lib/test_free_pages.c
new file mode 100644
index 000000000..9ebf6f554
--- /dev/null
+++ b/lib/test_free_pages.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_free_pages.c: Check that free_pages() doesn't leak memory
+ * Copyright (c) 2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+static void test_free_pages(gfp_t gfp)
+{
+ unsigned int i;
+
+ for (i = 0; i < 1000 * 1000; i++) {
+ unsigned long addr = __get_free_pages(gfp, 3);
+ struct page *page = virt_to_page((void *)addr);
+
+ /* Simulate page cache getting a speculative reference */
+ get_page(page);
+ free_pages(addr, 3);
+ put_page(page);
+ }
+}
+
+static int m_in(void)
+{
+ pr_info("Testing with GFP_KERNEL\n");
+ test_free_pages(GFP_KERNEL);
+ pr_info("Testing with GFP_KERNEL | __GFP_COMP\n");
+ test_free_pages(GFP_KERNEL | __GFP_COMP);
+ pr_info("Test completed\n");
+
+ return 0;
+}
+
+static void m_ex(void)
+{
+}
+
+module_init(m_in);
+module_exit(m_ex);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hash.c b/lib/test_hash.c
new file mode 100644
index 000000000..bb25fda34
--- /dev/null
+++ b/lib/test_hash.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for <linux/hash.h> and <linux/stringhash.h>
+ * This just verifies that various ways of computing a hash
+ * produce the same thing and, for cases where a k-bit hash
+ * value is requested, is of the requested size.
+ *
+ * We fill a buffer with a 255-byte null-terminated string,
+ * and use both full_name_hash() and hashlen_string() to hash the
+ * substrings from i to j, where 0 <= i < j < 256.
+ *
+ * The returned values are used to check that __hash_32() and
+ * __hash_32_generic() compute the same thing. Likewise hash_32()
+ * and hash_64().
+ */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/stringhash.h>
+#include <kunit/test.h>
+
+/* 32-bit XORSHIFT generator. Seed must not be zero. */
+static u32 __attribute_const__
+xorshift(u32 seed)
+{
+ seed ^= seed << 13;
+ seed ^= seed >> 17;
+ seed ^= seed << 5;
+ return seed;
+}
+
+/* Given a non-zero x, returns a non-zero byte. */
+static u8 __attribute_const__
+mod255(u32 x)
+{
+ x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */
+ return x;
+}
+
+/* Fill the buffer with non-zero bytes. */
+static void fill_buf(char *buf, size_t len, u32 seed)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ seed = xorshift(seed);
+ buf[i] = mod255(seed);
+ }
+}
+
+/* Holds most testing variables for the int test. */
+struct test_hash_params {
+ /* Pointer to integer to be hashed. */
+ unsigned long long *h64;
+ /* Low 32-bits of integer to be hashed. */
+ u32 h0;
+ /* Arch-specific hash result. */
+ u32 h1;
+ /* Generic hash result. */
+ u32 h2;
+ /* ORed hashes of given size (in bits). */
+ u32 (*hash_or)[33];
+};
+
+#ifdef HAVE_ARCH__HASH_32
+static void
+test_int__hash_32(struct kunit *test, struct test_hash_params *params)
+{
+ params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0);
+#if HAVE_ARCH__HASH_32 == 1
+ KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
+ "__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
+ params->h0, params->h1, params->h2);
+#endif
+}
+#endif
+
+#ifdef HAVE_ARCH_HASH_64
+static void
+test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k)
+{
+ params->h2 = hash_64_generic(*params->h64, *k);
+#if HAVE_ARCH_HASH_64 == 1
+ KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
+ "hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x",
+ *params->h64, *k, params->h1, params->h2);
+#else
+ KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2,
+ "hash_64_generic(%#llx, %d) = %#x > %#x",
+ *params->h64, *k, params->h1, *m);
+#endif
+}
+#endif
+
+/*
+ * Test the various integer hash functions. h64 (or its low-order bits)
+ * is the integer to hash. hash_or accumulates the OR of the hash values,
+ * which are later checked to see that they cover all the requested bits.
+ *
+ * Because these functions (as opposed to the string hashes) are all
+ * inline, the code being tested is actually in the module, and you can
+ * recompile and re-test the module without rebooting.
+ */
+static void
+test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33])
+{
+ int k;
+ struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or };
+
+ /* Test __hash32 */
+ hash_or[0][0] |= params.h1 = __hash_32(params.h0);
+#ifdef HAVE_ARCH__HASH_32
+ test_int__hash_32(test, &params);
+#endif
+
+ /* Test k = 1..32 bits */
+ for (k = 1; k <= 32; k++) {
+ u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
+
+ /* Test hash_32 */
+ hash_or[0][k] |= params.h1 = hash_32(params.h0, k);
+ KUNIT_EXPECT_LE_MSG(test, params.h1, m,
+ "hash_32(%#x, %d) = %#x > %#x",
+ params.h0, k, params.h1, m);
+
+ /* Test hash_64 */
+ hash_or[1][k] |= params.h1 = hash_64(h64, k);
+ KUNIT_EXPECT_LE_MSG(test, params.h1, m,
+ "hash_64(%#llx, %d) = %#x > %#x",
+ h64, k, params.h1, m);
+#ifdef HAVE_ARCH_HASH_64
+ test_int_hash_64(test, &params, &m, &k);
+#endif
+ }
+}
+
+#define SIZE 256 /* Run time is cubic in SIZE */
+
+static void test_string_or(struct kunit *test)
+{
+ char buf[SIZE+1];
+ u32 string_or = 0;
+ int i, j;
+
+ fill_buf(buf, SIZE, 1);
+
+ /* Test every possible non-empty substring in the buffer. */
+ for (j = SIZE; j > 0; --j) {
+ buf[j] = '\0';
+
+ for (i = 0; i <= j; i++) {
+ u32 h0 = full_name_hash(buf+i, buf+i, j-i);
+
+ string_or |= h0;
+ } /* i */
+ } /* j */
+
+ /* The OR of all the hash values should cover all the bits */
+ KUNIT_EXPECT_EQ_MSG(test, string_or, -1u,
+ "OR of all string hash results = %#x != %#x",
+ string_or, -1u);
+}
+
+static void test_hash_or(struct kunit *test)
+{
+ char buf[SIZE+1];
+ u32 hash_or[2][33] = { { 0, } };
+ unsigned long long h64 = 0;
+ int i, j;
+
+ fill_buf(buf, SIZE, 1);
+
+ /* Test every possible non-empty substring in the buffer. */
+ for (j = SIZE; j > 0; --j) {
+ buf[j] = '\0';
+
+ for (i = 0; i <= j; i++) {
+ u64 hashlen = hashlen_string(buf+i, buf+i);
+ u32 h0 = full_name_hash(buf+i, buf+i, j-i);
+
+ /* Check that hashlen_string gets the length right */
+ KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i,
+ "hashlen_string(%d..%d) returned length %u, expected %d",
+ i, j, hashlen_len(hashlen), j-i);
+ /* Check that the hashes match */
+ KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0,
+ "hashlen_string(%d..%d) = %08x != full_name_hash() = %08x",
+ i, j, hashlen_hash(hashlen), h0);
+
+ h64 = h64 << 32 | h0; /* For use with hash_64 */
+ test_int_hash(test, h64, hash_or);
+ } /* i */
+ } /* j */
+
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u,
+ "OR of all __hash_32 results = %#x != %#x",
+ hash_or[0][0], -1u);
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u,
+ "OR of all __hash_32_generic results = %#x != %#x",
+ hash_or[1][0], -1u);
+#endif
+#endif
+
+ /* Likewise for all the i-bit hash values */
+ for (i = 1; i <= 32; i++) {
+ u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
+
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m,
+ "OR of all hash_32(%d) results = %#x (%#x expected)",
+ i, hash_or[0][i], m);
+ KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m,
+ "OR of all hash_64(%d) results = %#x (%#x expected)",
+ i, hash_or[1][i], m);
+ }
+}
+
+static struct kunit_case hash_test_cases[] __refdata = {
+ KUNIT_CASE(test_string_or),
+ KUNIT_CASE(test_hash_or),
+ {}
+};
+
+static struct kunit_suite hash_test_suite = {
+ .name = "hash",
+ .test_cases = hash_test_cases,
+};
+
+
+kunit_test_suite(hash_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
new file mode 100644
index 000000000..b916801f2
--- /dev/null
+++ b/lib/test_hexdump.c
@@ -0,0 +1,256 @@
+/*
+ * Test cases for lib/hexdump.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/string.h>
+
+static const unsigned char data_b[] = {
+ '\xbe', '\x32', '\xdb', '\x7b', '\x0a', '\x18', '\x93', '\xb2', /* 00 - 07 */
+ '\x70', '\xba', '\xc4', '\x24', '\x7d', '\x83', '\x34', '\x9b', /* 08 - 0f */
+ '\xa6', '\x9c', '\x31', '\xad', '\x9c', '\x0f', '\xac', '\xe9', /* 10 - 17 */
+ '\x4c', '\xd1', '\x19', '\x99', '\x43', '\xb1', '\xaf', '\x0c', /* 18 - 1f */
+};
+
+static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
+
+static const char * const test_data_1[] __initconst = {
+ "be", "32", "db", "7b", "0a", "18", "93", "b2",
+ "70", "ba", "c4", "24", "7d", "83", "34", "9b",
+ "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
+ "4c", "d1", "19", "99", "43", "b1", "af", "0c",
+};
+
+static const char * const test_data_2_le[] __initconst = {
+ "32be", "7bdb", "180a", "b293",
+ "ba70", "24c4", "837d", "9b34",
+ "9ca6", "ad31", "0f9c", "e9ac",
+ "d14c", "9919", "b143", "0caf",
+};
+
+static const char * const test_data_2_be[] __initconst = {
+ "be32", "db7b", "0a18", "93b2",
+ "70ba", "c424", "7d83", "349b",
+ "a69c", "31ad", "9c0f", "ace9",
+ "4cd1", "1999", "43b1", "af0c",
+};
+
+static const char * const test_data_4_le[] __initconst = {
+ "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
+ "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
+};
+
+static const char * const test_data_4_be[] __initconst = {
+ "be32db7b", "0a1893b2", "70bac424", "7d83349b",
+ "a69c31ad", "9c0face9", "4cd11999", "43b1af0c",
+};
+
+static const char * const test_data_8_le[] __initconst = {
+ "b293180a7bdb32be", "9b34837d24c4ba70",
+ "e9ac0f9cad319ca6", "0cafb1439919d14c",
+};
+
+static const char * const test_data_8_be[] __initconst = {
+ "be32db7b0a1893b2", "70bac4247d83349b",
+ "a69c31ad9c0face9", "4cd1199943b1af0c",
+};
+
+#define FILL_CHAR '#'
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_hexdump_prepare_test(size_t len, int rowsize,
+ int groupsize, char *test,
+ size_t testlen, bool ascii)
+{
+ char *p;
+ const char * const *result;
+ size_t l = len;
+ int gs = groupsize, rs = rowsize;
+ unsigned int i;
+ const bool is_be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+
+ if (rs != 16 && rs != 32)
+ rs = 16;
+
+ if (l > rs)
+ l = rs;
+
+ if (!is_power_of_2(gs) || gs > 8 || (len % gs != 0))
+ gs = 1;
+
+ if (gs == 8)
+ result = is_be ? test_data_8_be : test_data_8_le;
+ else if (gs == 4)
+ result = is_be ? test_data_4_be : test_data_4_le;
+ else if (gs == 2)
+ result = is_be ? test_data_2_be : test_data_2_le;
+ else
+ result = test_data_1;
+
+ /* hex dump */
+ p = test;
+ for (i = 0; i < l / gs; i++) {
+ const char *q = *result++;
+ size_t amount = strlen(q);
+
+ memcpy(p, q, amount);
+ p += amount;
+
+ *p++ = ' ';
+ }
+ if (i)
+ p--;
+
+ /* ASCII part */
+ if (ascii) {
+ do {
+ *p++ = ' ';
+ } while (p < test + rs * 2 + rs / gs + 1);
+
+ strncpy(p, data_a, l);
+ p += l;
+ }
+
+ *p = '\0';
+}
+
+#define TEST_HEXDUMP_BUF_SIZE (32 * 3 + 2 + 32 + 1)
+
+static void __init test_hexdump(size_t len, int rowsize, int groupsize,
+ bool ascii)
+{
+ char test[TEST_HEXDUMP_BUF_SIZE];
+ char real[TEST_HEXDUMP_BUF_SIZE];
+
+ total_tests++;
+
+ memset(real, FILL_CHAR, sizeof(real));
+ hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real),
+ ascii);
+
+ memset(test, FILL_CHAR, sizeof(test));
+ test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test),
+ ascii);
+
+ if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) {
+ pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize);
+ pr_err("Result: '%s'\n", real);
+ pr_err("Expect: '%s'\n", test);
+ failed_tests++;
+ }
+}
+
+static void __init test_hexdump_set(int rowsize, bool ascii)
+{
+ size_t d = min_t(size_t, sizeof(data_b), rowsize);
+ size_t len = get_random_u32_inclusive(1, d);
+
+ test_hexdump(len, rowsize, 4, ascii);
+ test_hexdump(len, rowsize, 2, ascii);
+ test_hexdump(len, rowsize, 8, ascii);
+ test_hexdump(len, rowsize, 1, ascii);
+}
+
+static void __init test_hexdump_overflow(size_t buflen, size_t len,
+ int rowsize, int groupsize,
+ bool ascii)
+{
+ char test[TEST_HEXDUMP_BUF_SIZE];
+ char buf[TEST_HEXDUMP_BUF_SIZE];
+ int rs = rowsize, gs = groupsize;
+ int ae, he, e, f, r;
+ bool a;
+
+ total_tests++;
+
+ memset(buf, FILL_CHAR, sizeof(buf));
+
+ r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii);
+
+ /*
+ * Caller must provide the data length multiple of groupsize. The
+ * calculations below are made with that assumption in mind.
+ */
+ ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */;
+ he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */;
+
+ if (ascii)
+ e = ae;
+ else
+ e = he;
+
+ f = min_t(int, e + 1, buflen);
+ if (buflen) {
+ test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii);
+ test[f - 1] = '\0';
+ }
+ memset(test + f, FILL_CHAR, sizeof(test) - f);
+
+ a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE);
+
+ buf[sizeof(buf) - 1] = '\0';
+
+ if (!a) {
+ pr_err("Len: %zu buflen: %zu strlen: %zu\n",
+ len, buflen, strnlen(buf, sizeof(buf)));
+ pr_err("Result: %d '%s'\n", r, buf);
+ pr_err("Expect: %d '%s'\n", e, test);
+ failed_tests++;
+ }
+}
+
+static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
+{
+ unsigned int i = 0;
+ int rs = get_random_u32_inclusive(1, 2) * 16;
+
+ do {
+ int gs = 1 << i;
+ size_t len = get_random_u32_below(rs) + gs;
+
+ test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
+ } while (i++ < 3);
+}
+
+static int __init test_hexdump_init(void)
+{
+ unsigned int i;
+ int rowsize;
+
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
+ for (i = 0; i < 16; i++)
+ test_hexdump_set(rowsize, false);
+
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
+ for (i = 0; i < 16; i++)
+ test_hexdump_set(rowsize, true);
+
+ for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++)
+ test_hexdump_overflow_set(i, false);
+
+ for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++)
+ test_hexdump_overflow_set(i, true);
+
+ if (failed_tests == 0)
+ pr_info("all %u tests passed\n", total_tests);
+ else
+ pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+ return failed_tests ? -EINVAL : 0;
+}
+module_init(test_hexdump_init);
+
+static void __exit test_hexdump_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_hexdump_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
new file mode 100644
index 000000000..717dcb830
--- /dev/null
+++ b/lib/test_hmm.c
@@ -0,0 +1,1553 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management)
+ * mirror and zone device private memory migration APIs of the kernel.
+ * Userspace programs can register with the driver to mirror their own address
+ * space and can use the device to read/write any valid virtual address.
+ */
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/memremap.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+#include <linux/hmm.h>
+#include <linux/vmalloc.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/sched/mm.h>
+#include <linux/platform_device.h>
+#include <linux/rmap.h>
+#include <linux/mmu_notifier.h>
+#include <linux/migrate.h>
+
+#include "test_hmm_uapi.h"
+
+#define DMIRROR_NDEVICES 4
+#define DMIRROR_RANGE_FAULT_TIMEOUT 1000
+#define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U)
+#define DEVMEM_CHUNKS_RESERVE 16
+
+/*
+ * For device_private pages, dpage is just a dummy struct page
+ * representing a piece of device memory. dmirror_devmem_alloc_page
+ * allocates a real system memory page as backing storage to fake a
+ * real device. zone_device_data points to that backing page. But
+ * for device_coherent memory, the struct page represents real
+ * physical CPU-accessible memory that we can use directly.
+ */
+#define BACKING_PAGE(page) (is_device_private_page((page)) ? \
+ (page)->zone_device_data : (page))
+
+static unsigned long spm_addr_dev0;
+module_param(spm_addr_dev0, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev0,
+ "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static unsigned long spm_addr_dev1;
+module_param(spm_addr_dev1, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev1,
+ "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static const struct dev_pagemap_ops dmirror_devmem_ops;
+static const struct mmu_interval_notifier_ops dmirror_min_ops;
+static dev_t dmirror_dev;
+
+struct dmirror_device;
+
+struct dmirror_bounce {
+ void *ptr;
+ unsigned long size;
+ unsigned long addr;
+ unsigned long cpages;
+};
+
+#define DPT_XA_TAG_ATOMIC 1UL
+#define DPT_XA_TAG_WRITE 3UL
+
+/*
+ * Data structure to track address ranges and register for mmu interval
+ * notifier updates.
+ */
+struct dmirror_interval {
+ struct mmu_interval_notifier notifier;
+ struct dmirror *dmirror;
+};
+
+/*
+ * Data attached to the open device file.
+ * Note that it might be shared after a fork().
+ */
+struct dmirror {
+ struct dmirror_device *mdevice;
+ struct xarray pt;
+ struct mmu_interval_notifier notifier;
+ struct mutex mutex;
+};
+
+/*
+ * ZONE_DEVICE pages for migration and simulating device memory.
+ */
+struct dmirror_chunk {
+ struct dev_pagemap pagemap;
+ struct dmirror_device *mdevice;
+ bool remove;
+};
+
+/*
+ * Per device data.
+ */
+struct dmirror_device {
+ struct cdev cdevice;
+ unsigned int zone_device_type;
+ struct device device;
+
+ unsigned int devmem_capacity;
+ unsigned int devmem_count;
+ struct dmirror_chunk **devmem_chunks;
+ struct mutex devmem_lock; /* protects the above */
+
+ unsigned long calloc;
+ unsigned long cfree;
+ struct page *free_pages;
+ spinlock_t lock; /* protects the above */
+};
+
+static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES];
+
+static int dmirror_bounce_init(struct dmirror_bounce *bounce,
+ unsigned long addr,
+ unsigned long size)
+{
+ bounce->addr = addr;
+ bounce->size = size;
+ bounce->cpages = 0;
+ bounce->ptr = vmalloc(size);
+ if (!bounce->ptr)
+ return -ENOMEM;
+ return 0;
+}
+
+static bool dmirror_is_private_zone(struct dmirror_device *mdevice)
+{
+ return (mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false;
+}
+
+static enum migrate_vma_direction
+dmirror_select_device(struct dmirror *dmirror)
+{
+ return (dmirror->mdevice->zone_device_type ==
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ?
+ MIGRATE_VMA_SELECT_DEVICE_PRIVATE :
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT;
+}
+
+static void dmirror_bounce_fini(struct dmirror_bounce *bounce)
+{
+ vfree(bounce->ptr);
+}
+
+static int dmirror_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct dmirror *dmirror;
+ int ret;
+
+ /* Mirror this process address space */
+ dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL);
+ if (dmirror == NULL)
+ return -ENOMEM;
+
+ dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice);
+ mutex_init(&dmirror->mutex);
+ xa_init(&dmirror->pt);
+
+ ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm,
+ 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops);
+ if (ret) {
+ kfree(dmirror);
+ return ret;
+ }
+
+ filp->private_data = dmirror;
+ return 0;
+}
+
+static int dmirror_fops_release(struct inode *inode, struct file *filp)
+{
+ struct dmirror *dmirror = filp->private_data;
+
+ mmu_interval_notifier_remove(&dmirror->notifier);
+ xa_destroy(&dmirror->pt);
+ kfree(dmirror);
+ return 0;
+}
+
+static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page)
+{
+ return container_of(page->pgmap, struct dmirror_chunk, pagemap);
+}
+
+static struct dmirror_device *dmirror_page_to_device(struct page *page)
+
+{
+ return dmirror_page_to_chunk(page)->mdevice;
+}
+
+static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range)
+{
+ unsigned long *pfns = range->hmm_pfns;
+ unsigned long pfn;
+
+ for (pfn = (range->start >> PAGE_SHIFT);
+ pfn < (range->end >> PAGE_SHIFT);
+ pfn++, pfns++) {
+ struct page *page;
+ void *entry;
+
+ /*
+ * Since we asked for hmm_range_fault() to populate pages,
+ * it shouldn't return an error entry on success.
+ */
+ WARN_ON(*pfns & HMM_PFN_ERROR);
+ WARN_ON(!(*pfns & HMM_PFN_VALID));
+
+ page = hmm_pfn_to_page(*pfns);
+ WARN_ON(!page);
+
+ entry = page;
+ if (*pfns & HMM_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ else if (WARN_ON(range->default_flags & HMM_PFN_WRITE))
+ return -EFAULT;
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry))
+ return xa_err(entry);
+ }
+
+ return 0;
+}
+
+static void dmirror_do_update(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+ void *entry;
+
+ /*
+ * The XArray doesn't hold references to pages since it relies on
+ * the mmu notifier to clear page pointers when they become stale.
+ * Therefore, it is OK to just clear the entry.
+ */
+ xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT)
+ xa_erase(&dmirror->pt, pfn);
+}
+
+static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+
+ /*
+ * Ignore invalidation callbacks for device private pages since
+ * the invalidation is handled as part of the migration process.
+ */
+ if (range->event == MMU_NOTIFY_MIGRATE &&
+ range->owner == dmirror->mdevice)
+ return true;
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ mmu_interval_set_seq(mni, cur_seq);
+ dmirror_do_update(dmirror, range->start, range->end);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_min_ops = {
+ .invalidate = dmirror_interval_invalidate,
+};
+
+static int dmirror_range_fault(struct dmirror *dmirror,
+ struct hmm_range *range)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ int ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ ret = dmirror_do_fault(dmirror, range);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ return ret;
+}
+
+static int dmirror_fault(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, bool write)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long addr;
+ unsigned long pfns[64];
+ struct hmm_range range = {
+ .notifier = &dmirror->notifier,
+ .hmm_pfns = pfns,
+ .pfn_flags_mask = 0,
+ .default_flags =
+ HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0),
+ .dev_private_owner = dmirror->mdevice,
+ };
+ int ret = 0;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return 0;
+
+ for (addr = start; addr < end; addr = range.end) {
+ range.start = addr;
+ range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
+
+ ret = dmirror_range_fault(dmirror, &range);
+ if (ret)
+ break;
+ }
+
+ mmput(mm);
+ return ret;
+}
+
+static int dmirror_do_read(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, struct dmirror_bounce *bounce)
+{
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (!page)
+ return -ENOENT;
+
+ memcpy_from_page(ptr, page, 0, PAGE_SIZE);
+
+ ptr += PAGE_SIZE;
+ bounce->cpages++;
+ }
+
+ return 0;
+}
+
+static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
+{
+ struct dmirror_bounce bounce;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+
+ while (1) {
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret != -ENOENT)
+ break;
+
+ start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
+ ret = dmirror_fault(dmirror, start, end, false);
+ if (ret)
+ break;
+ cmd->faults++;
+ }
+
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static int dmirror_do_write(struct dmirror *dmirror, unsigned long start,
+ unsigned long end, struct dmirror_bounce *bounce)
+{
+ unsigned long pfn;
+ void *ptr;
+
+ ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+ struct page *page;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ page = xa_untag_pointer(entry);
+ if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE)
+ return -ENOENT;
+
+ memcpy_to_page(page, 0, ptr, PAGE_SIZE);
+
+ ptr += PAGE_SIZE;
+ bounce->cpages++;
+ }
+
+ return 0;
+}
+
+static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd)
+{
+ struct dmirror_bounce bounce;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr),
+ bounce.size)) {
+ ret = -EFAULT;
+ goto fini;
+ }
+
+ while (1) {
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_write(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret != -ENOENT)
+ break;
+
+ start = cmd->addr + (bounce.cpages << PAGE_SHIFT);
+ ret = dmirror_fault(dmirror, start, end, true);
+ if (ret)
+ break;
+ cmd->faults++;
+ }
+
+fini:
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
+ struct page **ppage)
+{
+ struct dmirror_chunk *devmem;
+ struct resource *res = NULL;
+ unsigned long pfn;
+ unsigned long pfn_first;
+ unsigned long pfn_last;
+ void *ptr;
+ int ret = -ENOMEM;
+
+ devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return ret;
+
+ switch (mdevice->zone_device_type) {
+ case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
+ res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+ "hmm_dmirror");
+ if (IS_ERR_OR_NULL(res))
+ goto err_devmem;
+ devmem->pagemap.range.start = res->start;
+ devmem->pagemap.range.end = res->end;
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ break;
+ case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
+ devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
+ spm_addr_dev0 :
+ spm_addr_dev1;
+ devmem->pagemap.range.end = devmem->pagemap.range.start +
+ DEVMEM_CHUNK_SIZE - 1;
+ devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_devmem;
+ }
+
+ devmem->pagemap.nr_range = 1;
+ devmem->pagemap.ops = &dmirror_devmem_ops;
+ devmem->pagemap.owner = mdevice;
+
+ mutex_lock(&mdevice->devmem_lock);
+
+ if (mdevice->devmem_count == mdevice->devmem_capacity) {
+ struct dmirror_chunk **new_chunks;
+ unsigned int new_capacity;
+
+ new_capacity = mdevice->devmem_capacity +
+ DEVMEM_CHUNKS_RESERVE;
+ new_chunks = krealloc(mdevice->devmem_chunks,
+ sizeof(new_chunks[0]) * new_capacity,
+ GFP_KERNEL);
+ if (!new_chunks)
+ goto err_release;
+ mdevice->devmem_capacity = new_capacity;
+ mdevice->devmem_chunks = new_chunks;
+ }
+ ptr = memremap_pages(&devmem->pagemap, numa_node_id());
+ if (IS_ERR_OR_NULL(ptr)) {
+ if (ptr)
+ ret = PTR_ERR(ptr);
+ else
+ ret = -EFAULT;
+ goto err_release;
+ }
+
+ devmem->mdevice = mdevice;
+ pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
+ pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT);
+ mdevice->devmem_chunks[mdevice->devmem_count++] = devmem;
+
+ mutex_unlock(&mdevice->devmem_lock);
+
+ pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n",
+ DEVMEM_CHUNK_SIZE / (1024 * 1024),
+ mdevice->devmem_count,
+ mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)),
+ pfn_first, pfn_last);
+
+ spin_lock(&mdevice->lock);
+ for (pfn = pfn_first; pfn < pfn_last; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
+ if (ppage) {
+ *ppage = mdevice->free_pages;
+ mdevice->free_pages = (*ppage)->zone_device_data;
+ mdevice->calloc++;
+ }
+ spin_unlock(&mdevice->lock);
+
+ return 0;
+
+err_release:
+ mutex_unlock(&mdevice->devmem_lock);
+ if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+err_devmem:
+ kfree(devmem);
+
+ return ret;
+}
+
+static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
+{
+ struct page *dpage = NULL;
+ struct page *rpage = NULL;
+
+ /*
+ * For ZONE_DEVICE private type, this is a fake device so we allocate
+ * real system memory to store our device memory.
+ * For ZONE_DEVICE coherent type we use the actual dpage to store the
+ * data and ignore rpage.
+ */
+ if (dmirror_is_private_zone(mdevice)) {
+ rpage = alloc_page(GFP_HIGHUSER);
+ if (!rpage)
+ return NULL;
+ }
+ spin_lock(&mdevice->lock);
+
+ if (mdevice->free_pages) {
+ dpage = mdevice->free_pages;
+ mdevice->free_pages = dpage->zone_device_data;
+ mdevice->calloc++;
+ spin_unlock(&mdevice->lock);
+ } else {
+ spin_unlock(&mdevice->lock);
+ if (dmirror_allocate_chunk(mdevice, &dpage))
+ goto error;
+ }
+
+ zone_device_page_init(dpage);
+ dpage->zone_device_data = rpage;
+ return dpage;
+
+error:
+ if (rpage)
+ __free_page(rpage);
+ return NULL;
+}
+
+static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ struct dmirror_device *mdevice = dmirror->mdevice;
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long addr;
+
+ for (addr = args->start; addr < args->end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *spage;
+ struct page *dpage;
+ struct page *rpage;
+
+ if (!(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ /*
+ * Note that spage might be NULL which is OK since it is an
+ * unallocated pte_none() or read-only zero page.
+ */
+ spage = migrate_pfn_to_page(*src);
+ if (WARN(spage && is_zone_device_page(spage),
+ "page already in device spage pfn: 0x%lx\n",
+ page_to_pfn(spage)))
+ continue;
+
+ dpage = dmirror_devmem_alloc_page(mdevice);
+ if (!dpage)
+ continue;
+
+ rpage = BACKING_PAGE(dpage);
+ if (spage)
+ copy_highpage(rpage, spage);
+ else
+ clear_highpage(rpage);
+
+ /*
+ * Normally, a device would use the page->zone_device_data to
+ * point to the mirror but here we use it to hold the page for
+ * the simulated device memory and that page holds the pointer
+ * to the mirror.
+ */
+ rpage->zone_device_data = dmirror;
+
+ pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+ *dst = migrate_pfn(page_to_pfn(dpage));
+ if ((*src & MIGRATE_PFN_WRITE) ||
+ (!spage && args->vma->vm_flags & VM_WRITE))
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+}
+
+static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pfn;
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
+ void *entry;
+
+ entry = xa_load(&dmirror->pt, pfn);
+ if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int dmirror_atomic_map(unsigned long start, unsigned long end,
+ struct page **pages, struct dmirror *dmirror)
+{
+ unsigned long pfn, mapped = 0;
+ int i;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
+ void *entry;
+
+ if (!pages[i])
+ continue;
+
+ entry = pages[i];
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+
+ mapped++;
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return mapped;
+}
+
+static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ const unsigned long *src = args->src;
+ const unsigned long *dst = args->dst;
+ unsigned long pfn;
+
+ /* Map the migrated pages into the device's page tables. */
+ mutex_lock(&dmirror->mutex);
+
+ for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++,
+ src++, dst++) {
+ struct page *dpage;
+ void *entry;
+
+ if (!(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ dpage = migrate_pfn_to_page(*dst);
+ if (!dpage)
+ continue;
+
+ entry = BACKING_PAGE(dpage);
+ if (*dst & MIGRATE_PFN_WRITE)
+ entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE);
+ entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
+ if (xa_is_err(entry)) {
+ mutex_unlock(&dmirror->mutex);
+ return xa_err(entry);
+ }
+ }
+
+ mutex_unlock(&dmirror->mutex);
+ return 0;
+}
+
+static int dmirror_exclusive(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct page *pages[64];
+ struct dmirror_bounce bounce;
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long mapped = 0;
+ int i;
+
+ if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT))
+ next = end;
+ else
+ next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT);
+
+ ret = make_device_exclusive_range(mm, addr, next, pages, NULL);
+ /*
+ * Do dmirror_atomic_map() iff all pages are marked for
+ * exclusive access to avoid accessing uninitialized
+ * fields of pages.
+ */
+ if (ret == (next - addr) >> PAGE_SHIFT)
+ mapped = dmirror_atomic_map(addr, next, pages, dmirror);
+ for (i = 0; i < ret; i++) {
+ if (pages[i]) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ if (addr + (mapped << PAGE_SHIFT) < next) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return -EBUSY;
+ }
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /* Return the migrated data for verification. */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+}
+
+static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
+ struct dmirror *dmirror)
+{
+ const unsigned long *src = args->src;
+ unsigned long *dst = args->dst;
+ unsigned long start = args->start;
+ unsigned long end = args->end;
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE,
+ src++, dst++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(*src);
+ if (!spage || !(*src & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ if (!dpage)
+ continue;
+ pr_debug("migrating from dev to sys pfn src: 0x%lx pfn dst: 0x%lx\n",
+ page_to_pfn(spage), page_to_pfn(dpage));
+
+ lock_page(dpage);
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
+ copy_highpage(dpage, spage);
+ *dst = migrate_pfn(page_to_pfn(dpage));
+ if (*src & MIGRATE_PFN_WRITE)
+ *dst |= MIGRATE_PFN_WRITE;
+ }
+ return 0;
+}
+
+static unsigned long
+dmirror_successful_migrated_pages(struct migrate_vma *migrate)
+{
+ unsigned long cpages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ cpages++;
+ }
+ return cpages;
+}
+
+static int dmirror_migrate_to_system(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ unsigned long src_pfns[64] = { 0 };
+ unsigned long dst_pfns[64] = { 0 };
+ struct migrate_vma args = { 0 };
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ cmd->cpages = 0;
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror);
+
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ pr_debug("Migrating from device mem to sys mem\n");
+ dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
+
+ migrate_vma_pages(&args);
+ cmd->cpages += dmirror_successful_migrated_pages(&args);
+ migrate_vma_finalize(&args);
+ }
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return ret;
+}
+
+static int dmirror_migrate_to_device(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ unsigned long start, end, addr;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct vm_area_struct *vma;
+ unsigned long src_pfns[64] = { 0 };
+ unsigned long dst_pfns[64] = { 0 };
+ struct dmirror_bounce bounce;
+ struct migrate_vma args = { 0 };
+ unsigned long next;
+ int ret;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ mmap_read_lock(mm);
+ for (addr = start; addr < end; addr = next) {
+ vma = vma_lookup(mm, addr);
+ if (!vma || !(vma->vm_flags & VM_READ)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT));
+ if (next > vma->vm_end)
+ next = vma->vm_end;
+
+ args.vma = vma;
+ args.src = src_pfns;
+ args.dst = dst_pfns;
+ args.start = addr;
+ args.end = next;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = MIGRATE_VMA_SELECT_SYSTEM;
+ ret = migrate_vma_setup(&args);
+ if (ret)
+ goto out;
+
+ pr_debug("Migrating from sys mem to device mem\n");
+ dmirror_migrate_alloc_and_copy(&args, dmirror);
+ migrate_vma_pages(&args);
+ dmirror_migrate_finalize_and_map(&args, dmirror);
+ migrate_vma_finalize(&args);
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ /*
+ * Return the migrated data for verification.
+ * Only for pages in device zone
+ */
+ ret = dmirror_bounce_init(&bounce, start, size);
+ if (ret)
+ return ret;
+ mutex_lock(&dmirror->mutex);
+ ret = dmirror_do_read(dmirror, start, end, &bounce);
+ mutex_unlock(&dmirror->mutex);
+ if (ret == 0) {
+ if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr,
+ bounce.size))
+ ret = -EFAULT;
+ }
+ cmd->cpages = bounce.cpages;
+ dmirror_bounce_fini(&bounce);
+ return ret;
+
+out:
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return ret;
+}
+
+static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range,
+ unsigned char *perm, unsigned long entry)
+{
+ struct page *page;
+
+ if (entry & HMM_PFN_ERROR) {
+ *perm = HMM_DMIRROR_PROT_ERROR;
+ return;
+ }
+ if (!(entry & HMM_PFN_VALID)) {
+ *perm = HMM_DMIRROR_PROT_NONE;
+ return;
+ }
+
+ page = hmm_pfn_to_page(entry);
+ if (is_device_private_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE;
+ } else if (is_device_coherent_page(page)) {
+ /* Is the page migrated to this device or some other? */
+ if (dmirror->mdevice == dmirror_page_to_device(page))
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL;
+ else
+ *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE;
+ } else if (is_zero_pfn(page_to_pfn(page)))
+ *perm = HMM_DMIRROR_PROT_ZERO;
+ else
+ *perm = HMM_DMIRROR_PROT_NONE;
+ if (entry & HMM_PFN_WRITE)
+ *perm |= HMM_DMIRROR_PROT_WRITE;
+ else
+ *perm |= HMM_DMIRROR_PROT_READ;
+ if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PMD;
+ else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT)
+ *perm |= HMM_DMIRROR_PROT_PUD;
+}
+
+static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct dmirror_interval *dmi =
+ container_of(mni, struct dmirror_interval, notifier);
+ struct dmirror *dmirror = dmi->dmirror;
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&dmirror->mutex);
+ else if (!mutex_trylock(&dmirror->mutex))
+ return false;
+
+ /*
+ * Snapshots only need to set the sequence number since any
+ * invalidation in the interval invalidates the whole snapshot.
+ */
+ mmu_interval_set_seq(mni, cur_seq);
+
+ mutex_unlock(&dmirror->mutex);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops dmirror_mrn_ops = {
+ .invalidate = dmirror_snapshot_invalidate,
+};
+
+static int dmirror_range_snapshot(struct dmirror *dmirror,
+ struct hmm_range *range,
+ unsigned char *perm)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ struct dmirror_interval notifier;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long i;
+ unsigned long n;
+ int ret = 0;
+
+ notifier.dmirror = dmirror;
+ range->notifier = &notifier.notifier;
+
+ ret = mmu_interval_notifier_insert(range->notifier, mm,
+ range->start, range->end - range->start,
+ &dmirror_mrn_ops);
+ if (ret)
+ return ret;
+
+ while (true) {
+ if (time_after(jiffies, timeout)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+
+ mmap_read_lock(mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(mm);
+ if (ret) {
+ if (ret == -EBUSY)
+ continue;
+ goto out;
+ }
+
+ mutex_lock(&dmirror->mutex);
+ if (mmu_interval_read_retry(range->notifier,
+ range->notifier_seq)) {
+ mutex_unlock(&dmirror->mutex);
+ continue;
+ }
+ break;
+ }
+
+ n = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < n; i++)
+ dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]);
+
+ mutex_unlock(&dmirror->mutex);
+out:
+ mmu_interval_notifier_remove(range->notifier);
+ return ret;
+}
+
+static int dmirror_snapshot(struct dmirror *dmirror,
+ struct hmm_dmirror_cmd *cmd)
+{
+ struct mm_struct *mm = dmirror->notifier.mm;
+ unsigned long start, end;
+ unsigned long size = cmd->npages << PAGE_SHIFT;
+ unsigned long addr;
+ unsigned long next;
+ unsigned long pfns[64];
+ unsigned char perm[64];
+ char __user *uptr;
+ struct hmm_range range = {
+ .hmm_pfns = pfns,
+ .dev_private_owner = dmirror->mdevice,
+ };
+ int ret = 0;
+
+ start = cmd->addr;
+ end = start + size;
+ if (end < start)
+ return -EINVAL;
+
+ /* Since the mm is for the mirrored process, get a reference first. */
+ if (!mmget_not_zero(mm))
+ return -EINVAL;
+
+ /*
+ * Register a temporary notifier to detect invalidations even if it
+ * overlaps with other mmu_interval_notifiers.
+ */
+ uptr = u64_to_user_ptr(cmd->ptr);
+ for (addr = start; addr < end; addr = next) {
+ unsigned long n;
+
+ next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end);
+ range.start = addr;
+ range.end = next;
+
+ ret = dmirror_range_snapshot(dmirror, &range, perm);
+ if (ret)
+ break;
+
+ n = (range.end - range.start) >> PAGE_SHIFT;
+ if (copy_to_user(uptr, perm, n)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ cmd->cpages += n;
+ uptr += n;
+ }
+ mmput(mm);
+
+ return ret;
+}
+
+static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
+{
+ unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT;
+ unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT;
+ unsigned long npages = end_pfn - start_pfn + 1;
+ unsigned long i;
+ unsigned long *src_pfns;
+ unsigned long *dst_pfns;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, start_pfn, npages);
+ for (i = 0; i < npages; i++) {
+ struct page *dpage, *spage;
+
+ spage = migrate_pfn_to_page(src_pfns[i]);
+ if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
+ continue;
+
+ if (WARN_ON(!is_device_private_page(spage) &&
+ !is_device_coherent_page(spage)))
+ continue;
+ spage = BACKING_PAGE(spage);
+ dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL);
+ lock_page(dpage);
+ copy_highpage(dpage, spage);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ if (src_pfns[i] & MIGRATE_PFN_WRITE)
+ dst_pfns[i] |= MIGRATE_PFN_WRITE;
+ }
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+}
+
+/* Removes free pages from the free list so they can't be re-allocated */
+static void dmirror_remove_free_pages(struct dmirror_chunk *devmem)
+{
+ struct dmirror_device *mdevice = devmem->mdevice;
+ struct page *page;
+
+ for (page = mdevice->free_pages; page; page = page->zone_device_data)
+ if (dmirror_page_to_chunk(page) == devmem)
+ mdevice->free_pages = page->zone_device_data;
+}
+
+static void dmirror_device_remove_chunks(struct dmirror_device *mdevice)
+{
+ unsigned int i;
+
+ mutex_lock(&mdevice->devmem_lock);
+ if (mdevice->devmem_chunks) {
+ for (i = 0; i < mdevice->devmem_count; i++) {
+ struct dmirror_chunk *devmem =
+ mdevice->devmem_chunks[i];
+
+ spin_lock(&mdevice->lock);
+ devmem->remove = true;
+ dmirror_remove_free_pages(devmem);
+ spin_unlock(&mdevice->lock);
+
+ dmirror_device_evict_chunk(devmem);
+ memunmap_pages(&devmem->pagemap);
+ if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+ release_mem_region(devmem->pagemap.range.start,
+ range_len(&devmem->pagemap.range));
+ kfree(devmem);
+ }
+ mdevice->devmem_count = 0;
+ mdevice->devmem_capacity = 0;
+ mdevice->free_pages = NULL;
+ kfree(mdevice->devmem_chunks);
+ mdevice->devmem_chunks = NULL;
+ }
+ mutex_unlock(&mdevice->devmem_lock);
+}
+
+static long dmirror_fops_unlocked_ioctl(struct file *filp,
+ unsigned int command,
+ unsigned long arg)
+{
+ void __user *uarg = (void __user *)arg;
+ struct hmm_dmirror_cmd cmd;
+ struct dmirror *dmirror;
+ int ret;
+
+ dmirror = filp->private_data;
+ if (!dmirror)
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, uarg, sizeof(cmd)))
+ return -EFAULT;
+
+ if (cmd.addr & ~PAGE_MASK)
+ return -EINVAL;
+ if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT)))
+ return -EINVAL;
+
+ cmd.cpages = 0;
+ cmd.faults = 0;
+
+ switch (command) {
+ case HMM_DMIRROR_READ:
+ ret = dmirror_read(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_WRITE:
+ ret = dmirror_write(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE_TO_DEV:
+ ret = dmirror_migrate_to_device(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_MIGRATE_TO_SYS:
+ ret = dmirror_migrate_to_system(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_EXCLUSIVE:
+ ret = dmirror_exclusive(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_CHECK_EXCLUSIVE:
+ ret = dmirror_check_atomic(dmirror, cmd.addr,
+ cmd.addr + (cmd.npages << PAGE_SHIFT));
+ break;
+
+ case HMM_DMIRROR_SNAPSHOT:
+ ret = dmirror_snapshot(dmirror, &cmd);
+ break;
+
+ case HMM_DMIRROR_RELEASE:
+ dmirror_device_remove_chunks(dmirror->mdevice);
+ ret = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ if (copy_to_user(uarg, &cmd, sizeof(cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
+ struct page *page;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -ENOMEM;
+
+ ret = vm_insert_page(vma, addr, page);
+ if (ret) {
+ __free_page(page);
+ return ret;
+ }
+ put_page(page);
+ }
+
+ return 0;
+}
+
+static const struct file_operations dmirror_fops = {
+ .open = dmirror_fops_open,
+ .release = dmirror_fops_release,
+ .mmap = dmirror_fops_mmap,
+ .unlocked_ioctl = dmirror_fops_unlocked_ioctl,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static void dmirror_devmem_free(struct page *page)
+{
+ struct page *rpage = BACKING_PAGE(page);
+ struct dmirror_device *mdevice;
+
+ if (rpage != page)
+ __free_page(rpage);
+
+ mdevice = dmirror_page_to_device(page);
+ spin_lock(&mdevice->lock);
+
+ /* Return page to our allocator if not freeing the chunk */
+ if (!dmirror_page_to_chunk(page)->remove) {
+ mdevice->cfree++;
+ page->zone_device_data = mdevice->free_pages;
+ mdevice->free_pages = page;
+ }
+ spin_unlock(&mdevice->lock);
+}
+
+static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
+{
+ struct migrate_vma args = { 0 };
+ unsigned long src_pfns = 0;
+ unsigned long dst_pfns = 0;
+ struct page *rpage;
+ struct dmirror *dmirror;
+ vm_fault_t ret;
+
+ /*
+ * Normally, a device would use the page->zone_device_data to point to
+ * the mirror but here we use it to hold the page for the simulated
+ * device memory and that page holds the pointer to the mirror.
+ */
+ rpage = vmf->page->zone_device_data;
+ dmirror = rpage->zone_device_data;
+
+ /* FIXME demonstrate how we can adjust migrate range */
+ args.vma = vmf->vma;
+ args.start = vmf->address;
+ args.end = args.start + PAGE_SIZE;
+ args.src = &src_pfns;
+ args.dst = &dst_pfns;
+ args.pgmap_owner = dmirror->mdevice;
+ args.flags = dmirror_select_device(dmirror);
+ args.fault_page = vmf->page;
+
+ if (migrate_vma_setup(&args))
+ return VM_FAULT_SIGBUS;
+
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
+ if (ret)
+ return ret;
+ migrate_vma_pages(&args);
+ /*
+ * No device finalize step is needed since
+ * dmirror_devmem_fault_alloc_and_copy() will have already
+ * invalidated the device page table.
+ */
+ migrate_vma_finalize(&args);
+ return 0;
+}
+
+static const struct dev_pagemap_ops dmirror_devmem_ops = {
+ .page_free = dmirror_devmem_free,
+ .migrate_to_ram = dmirror_devmem_fault,
+};
+
+static int dmirror_device_init(struct dmirror_device *mdevice, int id)
+{
+ dev_t dev;
+ int ret;
+
+ dev = MKDEV(MAJOR(dmirror_dev), id);
+ mutex_init(&mdevice->devmem_lock);
+ spin_lock_init(&mdevice->lock);
+
+ cdev_init(&mdevice->cdevice, &dmirror_fops);
+ mdevice->cdevice.owner = THIS_MODULE;
+ device_initialize(&mdevice->device);
+ mdevice->device.devt = dev;
+
+ ret = dev_set_name(&mdevice->device, "hmm_dmirror%u", id);
+ if (ret)
+ return ret;
+
+ ret = cdev_device_add(&mdevice->cdevice, &mdevice->device);
+ if (ret)
+ return ret;
+
+ /* Build a list of free ZONE_DEVICE struct pages */
+ return dmirror_allocate_chunk(mdevice, NULL);
+}
+
+static void dmirror_device_remove(struct dmirror_device *mdevice)
+{
+ dmirror_device_remove_chunks(mdevice);
+ cdev_device_del(&mdevice->cdevice, &mdevice->device);
+}
+
+static int __init hmm_dmirror_init(void)
+{
+ int ret;
+ int id = 0;
+ int ndevices = 0;
+
+ ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES,
+ "HMM_DMIRROR");
+ if (ret)
+ goto err_unreg;
+
+ memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0]));
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
+ if (spm_addr_dev0 && spm_addr_dev1) {
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ dmirror_devices[ndevices++].zone_device_type =
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT;
+ }
+ for (id = 0; id < ndevices; id++) {
+ ret = dmirror_device_init(dmirror_devices + id, id);
+ if (ret)
+ goto err_chrdev;
+ }
+
+ pr_info("HMM test module loaded. This is only for testing HMM.\n");
+ return 0;
+
+err_chrdev:
+ while (--id >= 0)
+ dmirror_device_remove(dmirror_devices + id);
+ unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
+err_unreg:
+ return ret;
+}
+
+static void __exit hmm_dmirror_exit(void)
+{
+ int id;
+
+ for (id = 0; id < DMIRROR_NDEVICES; id++)
+ if (dmirror_devices[id].zone_device_type)
+ dmirror_device_remove(dmirror_devices + id);
+ unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES);
+}
+
+module_init(hmm_dmirror_init);
+module_exit(hmm_dmirror_exit);
+MODULE_LICENSE("GPL");
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h
new file mode 100644
index 000000000..8c818a2cf
--- /dev/null
+++ b/lib/test_hmm_uapi.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This is a module to test the HMM (Heterogeneous Memory Management) API
+ * of the kernel. It allows a userspace program to expose its entire address
+ * space through the HMM test module device file.
+ */
+#ifndef _LIB_TEST_HMM_UAPI_H
+#define _LIB_TEST_HMM_UAPI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * Structure to pass to the HMM test driver to mimic a device accessing
+ * system memory and ZONE_DEVICE private memory through device page tables.
+ *
+ * @addr: (in) user address the device will read/write
+ * @ptr: (in) user address where device data is copied to/from
+ * @npages: (in) number of pages to read/write
+ * @cpages: (out) number of pages copied
+ * @faults: (out) number of device page faults seen
+ */
+struct hmm_dmirror_cmd {
+ __u64 addr;
+ __u64 ptr;
+ __u64 npages;
+ __u64 cpages;
+ __u64 faults;
+};
+
+/* Expose the address space of the calling process through hmm device file */
+#define HMM_DMIRROR_READ _IOWR('H', 0x00, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_WRITE _IOWR('H', 0x01, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE_TO_DEV _IOWR('H', 0x02, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_MIGRATE_TO_SYS _IOWR('H', 0x03, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd)
+
+/*
+ * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
+ * HMM_DMIRROR_PROT_ERROR: no valid mirror PTE for this page
+ * HMM_DMIRROR_PROT_NONE: unpopulated PTE or PTE with no access
+ * HMM_DMIRROR_PROT_READ: read-only PTE
+ * HMM_DMIRROR_PROT_WRITE: read/write PTE
+ * HMM_DMIRROR_PROT_PMD: PMD sized page is fully mapped by same permissions
+ * HMM_DMIRROR_PROT_PUD: PUD sized page is fully mapped by same permissions
+ * HMM_DMIRROR_PROT_ZERO: special read-only zero page
+ * HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL: Migrated device private page on the
+ * device the ioctl() is made
+ * HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE: Migrated device private page on some
+ * other device
+ * HMM_DMIRROR_PROT_DEV_COHERENT: Migrate device coherent page on the device
+ * the ioctl() is made
+ */
+enum {
+ HMM_DMIRROR_PROT_ERROR = 0xFF,
+ HMM_DMIRROR_PROT_NONE = 0x00,
+ HMM_DMIRROR_PROT_READ = 0x01,
+ HMM_DMIRROR_PROT_WRITE = 0x02,
+ HMM_DMIRROR_PROT_PMD = 0x04,
+ HMM_DMIRROR_PROT_PUD = 0x08,
+ HMM_DMIRROR_PROT_ZERO = 0x10,
+ HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL = 0x20,
+ HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE = 0x30,
+ HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL = 0x40,
+ HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE = 0x50,
+};
+
+enum {
+ /* 0 is reserved to catch uninitialized type fields */
+ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
+ HMM_DMIRROR_MEMORY_DEVICE_COHERENT,
+};
+
+#endif /* _LIB_TEST_HMM_UAPI_H */
diff --git a/lib/test_ida.c b/lib/test_ida.c
new file mode 100644
index 000000000..55105baa1
--- /dev/null
+++ b/lib/test_ida.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_ida.c: Test the IDA API
+ * Copyright (c) 2016-2018 Microsoft Corporation
+ * Copyright (c) 2018 Oracle Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/idr.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifdef __KERNEL__
+void ida_dump(struct ida *ida) { }
+#endif
+#define IDA_BUG_ON(ida, x) do { \
+ tests_run++; \
+ if (x) { \
+ ida_dump(ida); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+
+/*
+ * Straightforward checks that allocating and freeing IDs work.
+ */
+static void ida_check_alloc(struct ida *ida)
+{
+ int i, id;
+
+ for (i = 0; i < 10000; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+
+ ida_free(ida, 20);
+ ida_free(ida, 21);
+ for (i = 0; i < 3; i++) {
+ id = ida_alloc(ida, GFP_KERNEL);
+ IDA_BUG_ON(ida, id < 0);
+ if (i == 2)
+ IDA_BUG_ON(ida, id != 10000);
+ }
+
+ for (i = 0; i < 5000; i++)
+ ida_free(ida, i);
+
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
+ ida_destroy(ida);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Destroy an IDA with a single entry at @base */
+static void ida_check_destroy_1(struct ida *ida, unsigned int base)
+{
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Check that ida_destroy and ida_is_empty work */
+static void ida_check_destroy(struct ida *ida)
+{
+ /* Destroy an already-empty IDA */
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ ida_check_destroy_1(ida, 0);
+ ida_check_destroy_1(ida, 1);
+ ida_check_destroy_1(ida, 1023);
+ ida_check_destroy_1(ida, 1024);
+ ida_check_destroy_1(ida, 12345678);
+}
+
+/*
+ * Check what happens when we fill a leaf and then delete it. This may
+ * discover mishandling of IDR_FREE.
+ */
+static void ida_check_leaf(struct ida *ida, unsigned int base)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
+ IDA_BUG_ON(ida, ida_is_empty(ida));
+ ida_free(ida, 0);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/*
+ * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
+ * Allocating up to 2^31-1 should succeed, and then allocating the next one
+ * should fail.
+ */
+static void ida_check_max(struct ida *ida)
+{
+ unsigned long i, j;
+
+ for (j = 1; j < 65537; j *= 2) {
+ unsigned long base = (1UL << 31) - j;
+ for (i = 0; i < j; i++) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ base + i);
+ }
+ IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+ -ENOSPC);
+ ida_destroy(ida);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+}
+
+/*
+ * Check handling of conversions between exceptional entries and full bitmaps.
+ */
+static void ida_check_conv(struct ida *ida)
+{
+ unsigned long i;
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
+ IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
+ GFP_KERNEL) != i + BITS_PER_LONG);
+ ida_free(ida, i + 1);
+ ida_free(ida, i + BITS_PER_LONG);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+ }
+
+ for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+ for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
+ IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+ for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
+ ida_free(ida, i - 1);
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/*
+ * Check various situations where we attempt to free an ID we don't own.
+ */
+static void ida_check_bad_free(struct ida *ida)
+{
+ unsigned long i;
+
+ printk("vvv Ignore \"not allocated\" warnings\n");
+ /* IDA is empty; all of these will fail */
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+ printk("^^^ \"not allocated\" warnings over\n");
+
+ ida_free(ida, 3);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+static DEFINE_IDA(ida);
+
+static int ida_checks(void)
+{
+ IDA_BUG_ON(&ida, !ida_is_empty(&ida));
+ ida_check_alloc(&ida);
+ ida_check_destroy(&ida);
+ ida_check_leaf(&ida, 0);
+ ida_check_leaf(&ida, 1024);
+ ida_check_leaf(&ida, 1024 * 64);
+ ida_check_max(&ida);
+ ida_check_conv(&ida);
+ ida_check_bad_free(&ida);
+
+ printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run != tests_passed) ? 0 : -EINVAL;
+}
+
+static void ida_exit(void)
+{
+}
+
+module_init(ida_checks);
+module_exit(ida_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
new file mode 100644
index 000000000..43d9dfd57
--- /dev/null
+++ b/lib/test_kmod.c
@@ -0,0 +1,1222 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
+/*
+ * kmod stress test driver
+ *
+ * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*
+ * This driver provides an interface to trigger and test the kernel's
+ * module loader through a series of configurations and a few triggers.
+ * To test this driver use the following script as root:
+ *
+ * tools/testing/selftests/kmod/kmod.sh --help
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/printk.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#define TEST_START_NUM_THREADS 50
+#define TEST_START_DRIVER "test_module"
+#define TEST_START_TEST_FS "xfs"
+#define TEST_START_TEST_CASE TEST_KMOD_DRIVER
+
+
+static bool force_init_test = false;
+module_param(force_init_test, bool_enable_only, 0644);
+MODULE_PARM_DESC(force_init_test,
+ "Force kicking a test immediately after driver loads");
+
+/*
+ * For device allocation / registration
+ */
+static DEFINE_MUTEX(reg_dev_mutex);
+static LIST_HEAD(reg_test_devs);
+
+/*
+ * num_test_devs actually represents the *next* ID of the next
+ * device we will allow to create.
+ */
+static int num_test_devs;
+
+/**
+ * enum kmod_test_case - linker table test case
+ * @TEST_KMOD_DRIVER: stress tests request_module()
+ * @TEST_KMOD_FS_TYPE: stress tests get_fs_type()
+ *
+ * If you add a test case, please be sure to review if you need to set
+ * @need_mod_put for your tests case.
+ */
+enum kmod_test_case {
+ __TEST_KMOD_INVALID = 0,
+
+ TEST_KMOD_DRIVER,
+ TEST_KMOD_FS_TYPE,
+
+ __TEST_KMOD_MAX,
+};
+
+struct test_config {
+ char *test_driver;
+ char *test_fs;
+ unsigned int num_threads;
+ enum kmod_test_case test_case;
+ int test_result;
+};
+
+struct kmod_test_device;
+
+/**
+ * struct kmod_test_device_info - thread info
+ *
+ * @ret_sync: return value if request_module() is used, sync request for
+ * @TEST_KMOD_DRIVER
+ * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @thread_idx: thread ID
+ * @test_dev: test device test is being performed under
+ * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
+ * (module_put(fs_sync->owner)) when done, otherwise you will not be able
+ * to unload the respective modules and re-test. We use this to keep
+ * accounting of when we need this and to help out in case we need to
+ * error out and deal with module_put() on error.
+ */
+struct kmod_test_device_info {
+ int ret_sync;
+ struct file_system_type *fs_sync;
+ struct task_struct *task_sync;
+ unsigned int thread_idx;
+ struct kmod_test_device *test_dev;
+ bool need_mod_put;
+};
+
+/**
+ * struct kmod_test_device - test device to help test kmod
+ *
+ * @dev_idx: unique ID for test device
+ * @config: configuration for the test
+ * @misc_dev: we use a misc device under the hood
+ * @dev: pointer to misc_dev's own struct device
+ * @config_mutex: protects configuration of test
+ * @trigger_mutex: the test trigger can only be fired once at a time
+ * @thread_lock: protects @done count, and the @info per each thread
+ * @done: number of threads which have completed or failed
+ * @test_is_oom: when we run out of memory, use this to halt moving forward
+ * @kthreads_done: completion used to signal when all work is done
+ * @list: needed to be part of the reg_test_devs
+ * @info: array of info for each thread
+ */
+struct kmod_test_device {
+ int dev_idx;
+ struct test_config config;
+ struct miscdevice misc_dev;
+ struct device *dev;
+ struct mutex config_mutex;
+ struct mutex trigger_mutex;
+ struct mutex thread_mutex;
+
+ unsigned int done;
+
+ bool test_is_oom;
+ struct completion kthreads_done;
+ struct list_head list;
+
+ struct kmod_test_device_info *info;
+};
+
+static const char *test_case_str(enum kmod_test_case test_case)
+{
+ switch (test_case) {
+ case TEST_KMOD_DRIVER:
+ return "TEST_KMOD_DRIVER";
+ case TEST_KMOD_FS_TYPE:
+ return "TEST_KMOD_FS_TYPE";
+ default:
+ return "invalid";
+ }
+}
+
+static struct miscdevice *dev_to_misc_dev(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev)
+{
+ return container_of(misc_dev, struct kmod_test_device, misc_dev);
+}
+
+static struct kmod_test_device *dev_to_test_dev(struct device *dev)
+{
+ struct miscdevice *misc_dev;
+
+ misc_dev = dev_to_misc_dev(dev);
+
+ return misc_dev_to_test_dev(misc_dev);
+}
+
+/* Must run with thread_mutex held */
+static void kmod_test_done_check(struct kmod_test_device *test_dev,
+ unsigned int idx)
+{
+ struct test_config *config = &test_dev->config;
+
+ test_dev->done++;
+ dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done);
+
+ if (test_dev->done == config->num_threads) {
+ dev_info(test_dev->dev, "Done: %u threads have all run now\n",
+ test_dev->done);
+ dev_info(test_dev->dev, "Last thread to run: %u\n", idx);
+ complete(&test_dev->kthreads_done);
+ }
+}
+
+static void test_kmod_put_module(struct kmod_test_device_info *info)
+{
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+
+ if (!info->need_mod_put)
+ return;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ break;
+ case TEST_KMOD_FS_TYPE:
+ if (info->fs_sync && info->fs_sync->owner)
+ module_put(info->fs_sync->owner);
+ break;
+ default:
+ BUG();
+ }
+
+ info->need_mod_put = true;
+}
+
+static int run_request(void *data)
+{
+ struct kmod_test_device_info *info = data;
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ info->ret_sync = request_module("%s", config->test_driver);
+ break;
+ case TEST_KMOD_FS_TYPE:
+ info->fs_sync = get_fs_type(config->test_fs);
+ info->need_mod_put = true;
+ break;
+ default:
+ /* __trigger_config_run() already checked for test sanity */
+ BUG();
+ return -EINVAL;
+ }
+
+ dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx);
+
+ test_kmod_put_module(info);
+
+ mutex_lock(&test_dev->thread_mutex);
+ info->task_sync = NULL;
+ kmod_test_done_check(test_dev, info->thread_idx);
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return 0;
+}
+
+static int tally_work_test(struct kmod_test_device_info *info)
+{
+ struct kmod_test_device *test_dev = info->test_dev;
+ struct test_config *config = &test_dev->config;
+ int err_ret = 0;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ /*
+ * Only capture errors, if one is found that's
+ * enough, for now.
+ */
+ if (info->ret_sync != 0)
+ err_ret = info->ret_sync;
+ dev_info(test_dev->dev,
+ "Sync thread %d return status: %d\n",
+ info->thread_idx, info->ret_sync);
+ break;
+ case TEST_KMOD_FS_TYPE:
+ /* For now we make this simple */
+ if (!info->fs_sync)
+ err_ret = -EINVAL;
+ dev_info(test_dev->dev, "Sync thread %u fs: %s\n",
+ info->thread_idx, info->fs_sync ? config->test_fs :
+ "NULL");
+ break;
+ default:
+ BUG();
+ }
+
+ return err_ret;
+}
+
+/*
+ * XXX: add result option to display if all errors did not match.
+ * For now we just keep any error code if one was found.
+ *
+ * If this ran it means *all* tasks were created fine and we
+ * are now just collecting results.
+ *
+ * Only propagate errors, do not override with a subsequent success case.
+ */
+static void tally_up_work(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ struct kmod_test_device_info *info;
+ unsigned int idx;
+ int err_ret = 0;
+ int ret = 0;
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ dev_info(test_dev->dev, "Results:\n");
+
+ for (idx=0; idx < config->num_threads; idx++) {
+ info = &test_dev->info[idx];
+ ret = tally_work_test(info);
+ if (ret)
+ err_ret = ret;
+ }
+
+ /*
+ * Note: request_module() returns 256 for a module not found even
+ * though modprobe itself returns 1.
+ */
+ config->test_result = err_ret;
+
+ mutex_unlock(&test_dev->thread_mutex);
+}
+
+static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx)
+{
+ struct kmod_test_device_info *info = &test_dev->info[idx];
+ int fail_ret = -ENOMEM;
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ info->thread_idx = idx;
+ info->test_dev = test_dev;
+ info->task_sync = kthread_run(run_request, info, "%s-%u",
+ KBUILD_MODNAME, idx);
+
+ if (!info->task_sync || IS_ERR(info->task_sync)) {
+ test_dev->test_is_oom = true;
+ dev_err(test_dev->dev, "Setting up thread %u failed\n", idx);
+ info->task_sync = NULL;
+ goto err_out;
+ } else
+ dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx);
+
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return 0;
+
+err_out:
+ info->ret_sync = fail_ret;
+ mutex_unlock(&test_dev->thread_mutex);
+
+ return fail_ret;
+}
+
+static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ struct kmod_test_device_info *info;
+ unsigned int i;
+
+ dev_info(test_dev->dev, "Ending request_module() tests\n");
+
+ mutex_lock(&test_dev->thread_mutex);
+
+ for (i=0; i < config->num_threads; i++) {
+ info = &test_dev->info[i];
+ if (info->task_sync && !IS_ERR(info->task_sync)) {
+ dev_info(test_dev->dev,
+ "Stopping still-running thread %i\n", i);
+ kthread_stop(info->task_sync);
+ }
+
+ /*
+ * info->task_sync is well protected, it can only be
+ * NULL or a pointer to a struct. If its NULL we either
+ * never ran, or we did and we completed the work. Completed
+ * tasks *always* put the module for us. This is a sanity
+ * check -- just in case.
+ */
+ if (info->task_sync && info->need_mod_put)
+ test_kmod_put_module(info);
+ }
+
+ mutex_unlock(&test_dev->thread_mutex);
+}
+
+/*
+ * Only wait *iff* we did not run into any errors during all of our thread
+ * set up. If run into any issues we stop threads and just bail out with
+ * an error to the trigger. This also means we don't need any tally work
+ * for any threads which fail.
+ */
+static int try_requests(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ unsigned int idx;
+ int ret;
+ bool any_error = false;
+
+ for (idx=0; idx < config->num_threads; idx++) {
+ if (test_dev->test_is_oom) {
+ any_error = true;
+ break;
+ }
+
+ ret = try_one_request(test_dev, idx);
+ if (ret) {
+ any_error = true;
+ break;
+ }
+ }
+
+ if (!any_error) {
+ test_dev->test_is_oom = false;
+ dev_info(test_dev->dev,
+ "No errors were found while initializing threads\n");
+ wait_for_completion(&test_dev->kthreads_done);
+ tally_up_work(test_dev);
+ } else {
+ test_dev->test_is_oom = true;
+ dev_info(test_dev->dev,
+ "At least one thread failed to start, stop all work\n");
+ test_dev_kmod_stop_tests(test_dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int run_test_driver(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ dev_info(test_dev->dev, "Test case: %s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+ dev_info(test_dev->dev, "Test driver to load: %s\n",
+ config->test_driver);
+ dev_info(test_dev->dev, "Number of threads to run: %u\n",
+ config->num_threads);
+ dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
+ config->num_threads - 1);
+
+ return try_requests(test_dev);
+}
+
+static int run_test_fs_type(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ dev_info(test_dev->dev, "Test case: %s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+ dev_info(test_dev->dev, "Test filesystem to load: %s\n",
+ config->test_fs);
+ dev_info(test_dev->dev, "Number of threads to run: %u\n",
+ config->num_threads);
+ dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n",
+ config->num_threads - 1);
+
+ return try_requests(test_dev);
+}
+
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int len = 0;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ len += snprintf(buf, PAGE_SIZE,
+ "Custom trigger configuration for: %s\n",
+ dev_name(dev));
+
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "Number of threads:\t%u\n",
+ config->num_threads);
+
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "Test_case:\t%s (%u)\n",
+ test_case_str(config->test_case),
+ config->test_case);
+
+ if (config->test_driver)
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "driver:\t%s\n",
+ config->test_driver);
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "driver:\tEMPTY\n");
+
+ if (config->test_fs)
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "fs:\t%s\n",
+ config->test_fs);
+ else
+ len += snprintf(buf+len, PAGE_SIZE - len,
+ "fs:\tEMPTY\n");
+
+ mutex_unlock(&test_dev->config_mutex);
+
+ return len;
+}
+static DEVICE_ATTR_RO(config);
+
+/*
+ * This ensures we don't allow kicking threads through if our configuration
+ * is faulty.
+ */
+static int __trigger_config_run(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ test_dev->done = 0;
+
+ switch (config->test_case) {
+ case TEST_KMOD_DRIVER:
+ return run_test_driver(test_dev);
+ case TEST_KMOD_FS_TYPE:
+ return run_test_fs_type(test_dev);
+ default:
+ dev_warn(test_dev->dev,
+ "Invalid test case requested: %u\n",
+ config->test_case);
+ return -EINVAL;
+ }
+}
+
+static int trigger_config_run(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ int ret;
+
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ ret = __trigger_config_run(test_dev);
+ if (ret < 0)
+ goto out;
+ dev_info(test_dev->dev, "General test result: %d\n",
+ config->test_result);
+
+ /*
+ * We must return 0 after a trigger even unless something went
+ * wrong with the setup of the test. If the test setup went fine
+ * then userspace must just check the result of config->test_result.
+ * One issue with relying on the return from a call in the kernel
+ * is if the kernel returns a positive value using this trigger
+ * will not return the value to userspace, it would be lost.
+ *
+ * By not relying on capturing the return value of tests we are using
+ * through the trigger it also us to run tests with set -e and only
+ * fail when something went wrong with the driver upon trigger
+ * requests.
+ */
+ ret = 0;
+
+out:
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ return ret;
+}
+
+static ssize_t
+trigger_config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ int ret;
+
+ if (test_dev->test_is_oom)
+ return -ENOMEM;
+
+ /* For all intents and purposes we don't care what userspace
+ * sent this trigger, we care only that we were triggered.
+ * We treat the return value only for caputuring issues with
+ * the test setup. At this point all the test variables should
+ * have been allocated so typically this should never fail.
+ */
+ ret = trigger_config_run(test_dev);
+ if (unlikely(ret < 0))
+ goto out;
+
+ /*
+ * Note: any return > 0 will be treated as success
+ * and the error value will not be available to userspace.
+ * Do not rely on trying to send to userspace a test value
+ * return value as positive return errors will be lost.
+ */
+ if (WARN_ON(ret > 0))
+ return -EINVAL;
+
+ ret = count;
+out:
+ return ret;
+}
+static DEVICE_ATTR_WO(trigger_config);
+
+/*
+ * XXX: move to kstrncpy() once merged.
+ *
+ * Users should use kfree_const() when freeing these.
+ */
+static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
+{
+ *dst = kstrndup(name, count, gfp);
+ if (!*dst)
+ return -ENOSPC;
+ return count;
+}
+
+static int config_copy_test_driver_name(struct test_config *config,
+ const char *name,
+ size_t count)
+{
+ return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL);
+}
+
+
+static int config_copy_test_fs(struct test_config *config, const char *name,
+ size_t count)
+{
+ return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL);
+}
+
+static void __kmod_config_free(struct test_config *config)
+{
+ if (!config)
+ return;
+
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+
+ kfree_const(config->test_fs);
+ config->test_fs = NULL;
+}
+
+static void kmod_config_free(struct kmod_test_device *test_dev)
+{
+ struct test_config *config;
+
+ if (!test_dev)
+ return;
+
+ config = &test_dev->config;
+
+ mutex_lock(&test_dev->config_mutex);
+ __kmod_config_free(config);
+ mutex_unlock(&test_dev->config_mutex);
+}
+
+static ssize_t config_test_driver_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int copied;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+
+ copied = config_copy_test_driver_name(config, buf, count);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return copied;
+}
+
+/*
+ * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
+ */
+static ssize_t config_test_show_str(struct mutex *config_mutex,
+ char *dst,
+ char *src)
+{
+ int len;
+
+ mutex_lock(config_mutex);
+ len = snprintf(dst, PAGE_SIZE, "%s\n", src);
+ mutex_unlock(config_mutex);
+
+ return len;
+}
+
+static ssize_t config_test_driver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return config_test_show_str(&test_dev->config_mutex, buf,
+ config->test_driver);
+}
+static DEVICE_ATTR_RW(config_test_driver);
+
+static ssize_t config_test_fs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+ int copied;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ kfree_const(config->test_fs);
+ config->test_fs = NULL;
+
+ copied = config_copy_test_fs(config, buf, count);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return copied;
+}
+
+static ssize_t config_test_fs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return config_test_show_str(&test_dev->config_mutex, buf,
+ config->test_fs);
+}
+static DEVICE_ATTR_RW(config_test_fs);
+
+static int trigger_config_run_type(struct kmod_test_device *test_dev,
+ enum kmod_test_case test_case,
+ const char *test_str)
+{
+ int copied = 0;
+ struct test_config *config = &test_dev->config;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ switch (test_case) {
+ case TEST_KMOD_DRIVER:
+ kfree_const(config->test_driver);
+ config->test_driver = NULL;
+ copied = config_copy_test_driver_name(config, test_str,
+ strlen(test_str));
+ break;
+ case TEST_KMOD_FS_TYPE:
+ kfree_const(config->test_fs);
+ config->test_fs = NULL;
+ copied = config_copy_test_fs(config, test_str,
+ strlen(test_str));
+ break;
+ default:
+ mutex_unlock(&test_dev->config_mutex);
+ return -EINVAL;
+ }
+
+ config->test_case = test_case;
+
+ mutex_unlock(&test_dev->config_mutex);
+
+ if (copied <= 0 || copied != strlen(test_str)) {
+ test_dev->test_is_oom = true;
+ return -ENOMEM;
+ }
+
+ test_dev->test_is_oom = false;
+
+ return trigger_config_run(test_dev);
+}
+
+static void free_test_dev_info(struct kmod_test_device *test_dev)
+{
+ vfree(test_dev->info);
+ test_dev->info = NULL;
+}
+
+static int kmod_config_sync_info(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+
+ free_test_dev_info(test_dev);
+ test_dev->info =
+ vzalloc(array_size(sizeof(struct kmod_test_device_info),
+ config->num_threads));
+ if (!test_dev->info)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Old kernels may not have this, if you want to port this code to
+ * test it on older kernels.
+ */
+#ifdef get_kmod_umh_limit
+static unsigned int kmod_init_test_thread_limit(void)
+{
+ return get_kmod_umh_limit();
+}
+#else
+static unsigned int kmod_init_test_thread_limit(void)
+{
+ return TEST_START_NUM_THREADS;
+}
+#endif
+
+static int __kmod_config_init(struct kmod_test_device *test_dev)
+{
+ struct test_config *config = &test_dev->config;
+ int ret = -ENOMEM, copied;
+
+ __kmod_config_free(config);
+
+ copied = config_copy_test_driver_name(config, TEST_START_DRIVER,
+ strlen(TEST_START_DRIVER));
+ if (copied != strlen(TEST_START_DRIVER))
+ goto err_out;
+
+ copied = config_copy_test_fs(config, TEST_START_TEST_FS,
+ strlen(TEST_START_TEST_FS));
+ if (copied != strlen(TEST_START_TEST_FS))
+ goto err_out;
+
+ config->num_threads = kmod_init_test_thread_limit();
+ config->test_result = 0;
+ config->test_case = TEST_START_TEST_CASE;
+
+ ret = kmod_config_sync_info(test_dev);
+ if (ret)
+ goto err_out;
+
+ test_dev->test_is_oom = false;
+
+ return 0;
+
+err_out:
+ test_dev->test_is_oom = true;
+ WARN_ON(test_dev->test_is_oom);
+
+ __kmod_config_free(config);
+
+ return ret;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ int ret;
+
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ ret = __kmod_config_init(test_dev);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ dev_err(dev, "could not alloc settings for config trigger: %d\n",
+ ret);
+ goto out;
+ }
+
+ dev_info(dev, "reset\n");
+ ret = count;
+
+out:
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(reset);
+
+static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ unsigned int *config,
+ int (*test_sync)(struct kmod_test_device *test_dev))
+{
+ int ret;
+ unsigned int val;
+ unsigned int old_val;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&test_dev->config_mutex);
+
+ old_val = *config;
+ *(unsigned int *)config = val;
+
+ ret = test_sync(test_dev);
+ if (ret) {
+ *(unsigned int *)config = old_val;
+
+ ret = test_sync(test_dev);
+ WARN_ON(ret);
+
+ mutex_unlock(&test_dev->config_mutex);
+ return -EINVAL;
+ }
+
+ mutex_unlock(&test_dev->config_mutex);
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ unsigned int *config,
+ unsigned int min,
+ unsigned int max)
+{
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val < min || val > max)
+ return -EINVAL;
+
+ mutex_lock(&test_dev->config_mutex);
+ *config = val;
+ mutex_unlock(&test_dev->config_mutex);
+
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static int test_dev_config_update_int(struct kmod_test_device *test_dev,
+ const char *buf, size_t size,
+ int *config)
+{
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&test_dev->config_mutex);
+ *config = val;
+ mutex_unlock(&test_dev->config_mutex);
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+
+static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev,
+ char *buf,
+ int config)
+{
+ int val;
+
+ mutex_lock(&test_dev->config_mutex);
+ val = config;
+ mutex_unlock(&test_dev->config_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev,
+ char *buf,
+ unsigned int config)
+{
+ unsigned int val;
+
+ mutex_lock(&test_dev->config_mutex);
+ val = config;
+ mutex_unlock(&test_dev->config_mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t test_result_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_int(test_dev, buf, count,
+ &config->test_result);
+}
+
+static ssize_t config_num_threads_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_uint_sync(test_dev, buf, count,
+ &config->num_threads,
+ kmod_config_sync_info);
+}
+
+static ssize_t config_num_threads_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_int(test_dev, buf, config->num_threads);
+}
+static DEVICE_ATTR_RW(config_num_threads);
+
+static ssize_t config_test_case_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_update_uint_range(test_dev, buf, count,
+ &config->test_case,
+ __TEST_KMOD_INVALID + 1,
+ __TEST_KMOD_MAX - 1);
+}
+
+static ssize_t config_test_case_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_uint(test_dev, buf, config->test_case);
+}
+static DEVICE_ATTR_RW(config_test_case);
+
+static ssize_t test_result_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kmod_test_device *test_dev = dev_to_test_dev(dev);
+ struct test_config *config = &test_dev->config;
+
+ return test_dev_config_show_int(test_dev, buf, config->test_result);
+}
+static DEVICE_ATTR_RW(test_result);
+
+#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
+
+static struct attribute *test_dev_attrs[] = {
+ TEST_KMOD_DEV_ATTR(trigger_config),
+ TEST_KMOD_DEV_ATTR(config),
+ TEST_KMOD_DEV_ATTR(reset),
+
+ TEST_KMOD_DEV_ATTR(config_test_driver),
+ TEST_KMOD_DEV_ATTR(config_test_fs),
+ TEST_KMOD_DEV_ATTR(config_num_threads),
+ TEST_KMOD_DEV_ATTR(config_test_case),
+ TEST_KMOD_DEV_ATTR(test_result),
+
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(test_dev);
+
+static int kmod_config_init(struct kmod_test_device *test_dev)
+{
+ int ret;
+
+ mutex_lock(&test_dev->config_mutex);
+ ret = __kmod_config_init(test_dev);
+ mutex_unlock(&test_dev->config_mutex);
+
+ return ret;
+}
+
+static struct kmod_test_device *alloc_test_dev_kmod(int idx)
+{
+ int ret;
+ struct kmod_test_device *test_dev;
+ struct miscdevice *misc_dev;
+
+ test_dev = vzalloc(sizeof(struct kmod_test_device));
+ if (!test_dev)
+ goto err_out;
+
+ mutex_init(&test_dev->config_mutex);
+ mutex_init(&test_dev->trigger_mutex);
+ mutex_init(&test_dev->thread_mutex);
+
+ init_completion(&test_dev->kthreads_done);
+
+ ret = kmod_config_init(test_dev);
+ if (ret < 0) {
+ pr_err("Cannot alloc kmod_config_init()\n");
+ goto err_out_free;
+ }
+
+ test_dev->dev_idx = idx;
+ misc_dev = &test_dev->misc_dev;
+
+ misc_dev->minor = MISC_DYNAMIC_MINOR;
+ misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx);
+ if (!misc_dev->name) {
+ pr_err("Cannot alloc misc_dev->name\n");
+ goto err_out_free_config;
+ }
+ misc_dev->groups = test_dev_groups;
+
+ return test_dev;
+
+err_out_free_config:
+ free_test_dev_info(test_dev);
+ kmod_config_free(test_dev);
+err_out_free:
+ vfree(test_dev);
+ test_dev = NULL;
+err_out:
+ return NULL;
+}
+
+static void free_test_dev_kmod(struct kmod_test_device *test_dev)
+{
+ if (test_dev) {
+ kfree_const(test_dev->misc_dev.name);
+ test_dev->misc_dev.name = NULL;
+ free_test_dev_info(test_dev);
+ kmod_config_free(test_dev);
+ vfree(test_dev);
+ test_dev = NULL;
+ }
+}
+
+static struct kmod_test_device *register_test_dev_kmod(void)
+{
+ struct kmod_test_device *test_dev = NULL;
+ int ret;
+
+ mutex_lock(&reg_dev_mutex);
+
+ /* int should suffice for number of devices, test for wrap */
+ if (num_test_devs + 1 == INT_MAX) {
+ pr_err("reached limit of number of test devices\n");
+ goto out;
+ }
+
+ test_dev = alloc_test_dev_kmod(num_test_devs);
+ if (!test_dev)
+ goto out;
+
+ ret = misc_register(&test_dev->misc_dev);
+ if (ret) {
+ pr_err("could not register misc device: %d\n", ret);
+ free_test_dev_kmod(test_dev);
+ test_dev = NULL;
+ goto out;
+ }
+
+ test_dev->dev = test_dev->misc_dev.this_device;
+ list_add_tail(&test_dev->list, &reg_test_devs);
+ dev_info(test_dev->dev, "interface ready\n");
+
+ num_test_devs++;
+
+out:
+ mutex_unlock(&reg_dev_mutex);
+
+ return test_dev;
+
+}
+
+static int __init test_kmod_init(void)
+{
+ struct kmod_test_device *test_dev;
+ int ret;
+
+ test_dev = register_test_dev_kmod();
+ if (!test_dev) {
+ pr_err("Cannot add first test kmod device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * With some work we might be able to gracefully enable
+ * testing with this driver built-in, for now this seems
+ * rather risky. For those willing to try have at it,
+ * and enable the below. Good luck! If that works, try
+ * lowering the init level for more fun.
+ */
+ if (force_init_test) {
+ ret = trigger_config_run_type(test_dev,
+ TEST_KMOD_DRIVER, "tun");
+ if (WARN_ON(ret))
+ return ret;
+ ret = trigger_config_run_type(test_dev,
+ TEST_KMOD_FS_TYPE, "btrfs");
+ if (WARN_ON(ret))
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(test_kmod_init);
+
+static
+void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
+{
+ mutex_lock(&test_dev->trigger_mutex);
+ mutex_lock(&test_dev->config_mutex);
+
+ test_dev_kmod_stop_tests(test_dev);
+
+ dev_info(test_dev->dev, "removing interface\n");
+ misc_deregister(&test_dev->misc_dev);
+
+ mutex_unlock(&test_dev->config_mutex);
+ mutex_unlock(&test_dev->trigger_mutex);
+
+ free_test_dev_kmod(test_dev);
+}
+
+static void __exit test_kmod_exit(void)
+{
+ struct kmod_test_device *test_dev, *tmp;
+
+ mutex_lock(&reg_dev_mutex);
+ list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) {
+ list_del(&test_dev->list);
+ unregister_test_dev_kmod(test_dev);
+ }
+ mutex_unlock(&reg_dev_mutex);
+}
+module_exit(test_kmod_exit);
+
+MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c
new file mode 100644
index 000000000..0648f7154
--- /dev/null
+++ b/lib/test_kprobes.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * test_kprobes.c - simple sanity test for *probes
+ *
+ * Copyright IBM Corp. 2008
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/random.h>
+#include <kunit/test.h>
+
+#define div_factor 3
+
+static u32 rand1, preh_val, posth_val;
+static u32 (*target)(u32 value);
+static u32 (*recursed_target)(u32 value);
+static u32 (*target2)(u32 value);
+static struct kunit *current_test;
+
+static unsigned long (*internal_target)(void);
+static unsigned long (*stacktrace_target)(void);
+static unsigned long (*stacktrace_driver)(void);
+static unsigned long target_return_address[2];
+
+static noinline u32 kprobe_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static noinline u32 kprobe_recursed_target(u32 value)
+{
+ return (value / div_factor);
+}
+
+static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+
+ preh_val = recursed_target(rand1);
+ return 0;
+}
+
+static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ u32 expval = recursed_target(rand1);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, preh_val, expval);
+
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp = {
+ .symbol_name = "kprobe_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler
+};
+
+static void test_kprobe(struct kunit *test)
+{
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
+ target(rand1);
+ unregister_kprobe(&kp);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+}
+
+static noinline u32 kprobe_target2(u32 value)
+{
+ return (value / div_factor) + 1;
+}
+
+static noinline unsigned long kprobe_stacktrace_internal_target(void)
+{
+ if (!target_return_address[0])
+ target_return_address[0] = (unsigned long)__builtin_return_address(0);
+ return target_return_address[0];
+}
+
+static noinline unsigned long kprobe_stacktrace_target(void)
+{
+ if (!target_return_address[1])
+ target_return_address[1] = (unsigned long)__builtin_return_address(0);
+
+ if (internal_target)
+ internal_target();
+
+ return target_return_address[1];
+}
+
+static noinline unsigned long kprobe_stacktrace_driver(void)
+{
+ if (stacktrace_target)
+ stacktrace_target();
+
+ /* This is for preventing inlining the function */
+ return (unsigned long)__builtin_return_address(0);
+}
+
+static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
+{
+ preh_val = (rand1 / div_factor) + 1;
+ return 0;
+}
+
+static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
+ posth_val = preh_val + div_factor;
+}
+
+static struct kprobe kp2 = {
+ .symbol_name = "kprobe_target2",
+ .pre_handler = kp_pre_handler2,
+ .post_handler = kp_post_handler2
+};
+
+static void test_kprobes(struct kunit *test)
+{
+ struct kprobe *kps[2] = {&kp, &kp2};
+
+ current_test = test;
+
+ /* addr and flags should be cleard for reusing kprobe. */
+ kp.addr = NULL;
+ kp.flags = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
+ preh_val = 0;
+ posth_val = 0;
+ target(rand1);
+
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+
+ preh_val = 0;
+ posth_val = 0;
+ target2(rand1);
+
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+ unregister_kprobes(kps, 2);
+}
+
+static struct kprobe kp_missed = {
+ .symbol_name = "kprobe_recursed_target",
+ .pre_handler = kp_pre_handler,
+ .post_handler = kp_post_handler,
+};
+
+static void test_kprobe_missed(struct kunit *test)
+{
+ current_test = test;
+ preh_val = 0;
+ posth_val = 0;
+
+ KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
+
+ recursed_target(rand1);
+
+ KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
+ KUNIT_EXPECT_NE(test, 0, preh_val);
+ KUNIT_EXPECT_NE(test, 0, posth_val);
+
+ unregister_kprobe(&kp_missed);
+}
+
+#ifdef CONFIG_KRETPROBES
+static u32 krph_val;
+
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
+static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp = {
+ .handler = return_handler,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target"
+};
+
+static void test_kretprobe(struct kunit *test)
+{
+ current_test = test;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
+ target(rand1);
+ unregister_kretprobe(&rp);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+}
+
+static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long ret = regs_return_value(regs);
+
+ KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
+ KUNIT_EXPECT_NE(current_test, krph_val, 0);
+ krph_val = rand1;
+ return 0;
+}
+
+static struct kretprobe rp2 = {
+ .handler = return_handler2,
+ .entry_handler = entry_handler,
+ .kp.symbol_name = "kprobe_target2"
+};
+
+static void test_kretprobes(struct kunit *test)
+{
+ struct kretprobe *rps[2] = {&rp, &rp2};
+
+ current_test = test;
+ /* addr and flags should be cleard for reusing kprobe. */
+ rp.kp.addr = NULL;
+ rp.kp.flags = 0;
+ KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
+
+ krph_val = 0;
+ target(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+
+ krph_val = 0;
+ target2(rand1);
+ KUNIT_EXPECT_EQ(test, krph_val, rand1);
+ unregister_kretprobes(rps, 2);
+}
+
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+#define STACK_BUF_SIZE 16
+static unsigned long stack_buf[STACK_BUF_SIZE];
+
+static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler, this will involves
+ * kretprobe trampoline, but must include correct return address
+ * of the target function.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret; i++) {
+ if (stack_buf[i] == target_return_address[1])
+ break;
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
+ /*
+ * Test stacktrace from pt_regs at the return address. Thus the stack
+ * trace must start from the target return address.
+ */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
+#endif
+
+ return 0;
+}
+
+static struct kretprobe rp3 = {
+ .handler = stacktrace_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_target"
+};
+
+static void test_stacktrace_on_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ /*
+ * Run the stacktrace_driver() to record correct return address in
+ * stacktrace_target() and ensure stacktrace_driver() call is not
+ * inlined by checking the return address of stacktrace_driver()
+ * and the return address of this function is different.
+ */
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobe(&rp3);
+}
+
+static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ unsigned long retval = regs_return_value(regs);
+ int i, ret;
+
+ KUNIT_EXPECT_FALSE(current_test, preemptible());
+ KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
+
+ /*
+ * Test stacktrace inside the kretprobe handler for nested case.
+ * The unwinder will find the kretprobe_trampoline address on the
+ * return address, and kretprobe must solve that.
+ */
+ ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+
+ for (i = 0; i < ret - 1; i++) {
+ if (stack_buf[i] == target_return_address[0]) {
+ KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
+ break;
+ }
+ }
+ KUNIT_EXPECT_NE(current_test, i, ret);
+
+#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
+ /* Ditto for the regs version. */
+ ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
+ KUNIT_EXPECT_NE(current_test, ret, 0);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
+ KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
+#endif
+
+ return 0;
+}
+
+static struct kretprobe rp4 = {
+ .handler = stacktrace_internal_return_handler,
+ .kp.symbol_name = "kprobe_stacktrace_internal_target"
+};
+
+static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
+{
+ unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
+ struct kretprobe *rps[2] = {&rp3, &rp4};
+
+ current_test = test;
+ rp3.kp.addr = NULL;
+ rp3.kp.flags = 0;
+
+ //KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+
+ KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
+ KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
+ unregister_kretprobes(rps, 2);
+}
+#endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
+
+#endif /* CONFIG_KRETPROBES */
+
+static int kprobes_test_init(struct kunit *test)
+{
+ target = kprobe_target;
+ target2 = kprobe_target2;
+ recursed_target = kprobe_recursed_target;
+ stacktrace_target = kprobe_stacktrace_target;
+ internal_target = kprobe_stacktrace_internal_target;
+ stacktrace_driver = kprobe_stacktrace_driver;
+ rand1 = get_random_u32_above(div_factor);
+ return 0;
+}
+
+static struct kunit_case kprobes_testcases[] = {
+ KUNIT_CASE(test_kprobe),
+ KUNIT_CASE(test_kprobes),
+ KUNIT_CASE(test_kprobe_missed),
+#ifdef CONFIG_KRETPROBES
+ KUNIT_CASE(test_kretprobe),
+ KUNIT_CASE(test_kretprobes),
+#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
+ KUNIT_CASE(test_stacktrace_on_kretprobe),
+ KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
+#endif
+#endif
+ {}
+};
+
+static struct kunit_suite kprobes_test_suite = {
+ .name = "kprobes_test",
+ .init = kprobes_test_init,
+ .test_cases = kprobes_testcases,
+};
+
+kunit_test_suites(&kprobes_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c
new file mode 100644
index 000000000..c18f9c0f1
--- /dev/null
+++ b/lib/test_linear_ranges.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the linear_ranges helper.
+ *
+ * Copyright (C) 2020, ROHM Semiconductors.
+ * Author: Matti Vaittinen <matti.vaittien@fi.rohmeurope.com>
+ */
+#include <kunit/test.h>
+
+#include <linux/linear_range.h>
+
+/* First things first. I deeply dislike unit-tests. I have seen all the hell
+ * breaking loose when people who think the unit tests are "the silver bullet"
+ * to kill bugs get to decide how a company should implement testing strategy...
+ *
+ * Believe me, it may get _really_ ridiculous. It is tempting to think that
+ * walking through all the possible execution branches will nail down 100% of
+ * bugs. This may lead to ideas about demands to get certain % of "test
+ * coverage" - measured as line coverage. And that is one of the worst things
+ * you can do.
+ *
+ * Ask people to provide line coverage and they do. I've seen clever tools
+ * which generate test cases to test the existing functions - and by default
+ * these tools expect code to be correct and just generate checks which are
+ * passing when ran against current code-base. Run this generator and you'll get
+ * tests that do not test code is correct but just verify nothing changes.
+ * Problem is that testing working code is pointless. And if it is not
+ * working, your test must not assume it is working. You won't catch any bugs
+ * by such tests. What you can do is to generate a huge amount of tests.
+ * Especially if you were are asked to proivde 100% line-coverage x_x. So what
+ * does these tests - which are not finding any bugs now - do?
+ *
+ * They add inertia to every future development. I think it was Terry Pratchet
+ * who wrote someone having same impact as thick syrup has to chronometre.
+ * Excessive amount of unit-tests have this effect to development. If you do
+ * actually find _any_ bug from code in such environment and try fixing it...
+ * ...chances are you also need to fix the test cases. In sunny day you fix one
+ * test. But I've done refactoring which resulted 500+ broken tests (which had
+ * really zero value other than proving to managers that we do do "quality")...
+ *
+ * After this being said - there are situations where UTs can be handy. If you
+ * have algorithms which take some input and should produce output - then you
+ * can implement few, carefully selected simple UT-cases which test this. I've
+ * previously used this for example for netlink and device-tree data parsing
+ * functions. Feed some data examples to functions and verify the output is as
+ * expected. I am not covering all the cases but I will see the logic should be
+ * working.
+ *
+ * Here we also do some minor testing. I don't want to go through all branches
+ * or test more or less obvious things - but I want to see the main logic is
+ * working. And I definitely don't want to add 500+ test cases that break when
+ * some simple fix is done x_x. So - let's only add few, well selected tests
+ * which ensure as much logic is good as possible.
+ */
+
+/*
+ * Test Range 1:
+ * selectors: 2 3 4 5 6
+ * values (5): 10 20 30 40 50
+ *
+ * Test Range 2:
+ * selectors: 7 8 9 10
+ * values (4): 100 150 200 250
+ */
+
+#define RANGE1_MIN 10
+#define RANGE1_MIN_SEL 2
+#define RANGE1_STEP 10
+
+/* 2, 3, 4, 5, 6 */
+static const unsigned int range1_sels[] = { RANGE1_MIN_SEL, RANGE1_MIN_SEL + 1,
+ RANGE1_MIN_SEL + 2,
+ RANGE1_MIN_SEL + 3,
+ RANGE1_MIN_SEL + 4 };
+/* 10, 20, 30, 40, 50 */
+static const unsigned int range1_vals[] = { RANGE1_MIN, RANGE1_MIN +
+ RANGE1_STEP,
+ RANGE1_MIN + RANGE1_STEP * 2,
+ RANGE1_MIN + RANGE1_STEP * 3,
+ RANGE1_MIN + RANGE1_STEP * 4 };
+
+#define RANGE2_MIN 100
+#define RANGE2_MIN_SEL 7
+#define RANGE2_STEP 50
+
+/* 7, 8, 9, 10 */
+static const unsigned int range2_sels[] = { RANGE2_MIN_SEL, RANGE2_MIN_SEL + 1,
+ RANGE2_MIN_SEL + 2,
+ RANGE2_MIN_SEL + 3 };
+/* 100, 150, 200, 250 */
+static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN +
+ RANGE2_STEP,
+ RANGE2_MIN + RANGE2_STEP * 2,
+ RANGE2_MIN + RANGE2_STEP * 3 };
+
+#define RANGE1_NUM_VALS (ARRAY_SIZE(range1_vals))
+#define RANGE2_NUM_VALS (ARRAY_SIZE(range2_vals))
+#define RANGE_NUM_VALS (RANGE1_NUM_VALS + RANGE2_NUM_VALS)
+
+#define RANGE1_MAX_SEL (RANGE1_MIN_SEL + RANGE1_NUM_VALS - 1)
+#define RANGE1_MAX_VAL (range1_vals[RANGE1_NUM_VALS - 1])
+
+#define RANGE2_MAX_SEL (RANGE2_MIN_SEL + RANGE2_NUM_VALS - 1)
+#define RANGE2_MAX_VAL (range2_vals[RANGE2_NUM_VALS - 1])
+
+#define SMALLEST_SEL RANGE1_MIN_SEL
+#define SMALLEST_VAL RANGE1_MIN
+
+static struct linear_range testr[] = {
+ LINEAR_RANGE(RANGE1_MIN, RANGE1_MIN_SEL, RANGE1_MAX_SEL, RANGE1_STEP),
+ LINEAR_RANGE(RANGE2_MIN, RANGE2_MIN_SEL, RANGE2_MAX_SEL, RANGE2_STEP),
+};
+
+static void range_test_get_value(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel, val;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ sel = range1_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range1_vals[i]);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ sel = range2_sels[i];
+ ret = linear_range_get_value_array(&testr[0], 2, sel, &val);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, val, range2_vals[i]);
+ }
+ ret = linear_range_get_value_array(&testr[0], 2, sel + 1, &val);
+ KUNIT_EXPECT_NE(test, 0, ret);
+}
+
+static void range_test_get_selector_high(struct kunit *test)
+{
+ int ret, i;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_high(&testr[0], range1_vals[i],
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MAX_VAL + 1,
+ &sel, &found);
+ KUNIT_EXPECT_LE(test, ret, 0);
+
+ ret = linear_range_get_selector_high(&testr[0], RANGE1_MIN - 1,
+ &sel, &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_FALSE(test, found);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[0]);
+}
+
+static void range_test_get_value_amount(struct kunit *test)
+{
+ int ret;
+
+ ret = linear_range_values_in_range_array(&testr[0], 2);
+ KUNIT_EXPECT_EQ(test, (int)RANGE_NUM_VALS, ret);
+}
+
+static void range_test_get_selector_low(struct kunit *test)
+{
+ int i, ret;
+ unsigned int sel;
+ bool found;
+
+ for (i = 0; i < RANGE1_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range1_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range1_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+ for (i = 0; i < RANGE2_NUM_VALS; i++) {
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[i], &sel,
+ &found);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[i]);
+ KUNIT_EXPECT_TRUE(test, found);
+ }
+
+ /*
+ * Seek value greater than range max => get_selector_*_low should
+ * return Ok - but set found to false as value is not in range
+ */
+ ret = linear_range_get_selector_low_array(&testr[0], 2,
+ range2_vals[RANGE2_NUM_VALS - 1] + 1,
+ &sel, &found);
+
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, sel, range2_sels[RANGE2_NUM_VALS - 1]);
+ KUNIT_EXPECT_FALSE(test, found);
+}
+
+static struct kunit_case range_test_cases[] = {
+ KUNIT_CASE(range_test_get_value_amount),
+ KUNIT_CASE(range_test_get_selector_high),
+ KUNIT_CASE(range_test_get_selector_low),
+ KUNIT_CASE(range_test_get_value),
+ {},
+};
+
+static struct kunit_suite range_test_module = {
+ .name = "linear-ranges-test",
+ .test_cases = range_test_cases,
+};
+
+kunit_test_suites(&range_test_module);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
new file mode 100644
index 000000000..cc5f335f2
--- /dev/null
+++ b/lib/test_list_sort.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <kunit/test.h>
+
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define TEST_LIST_LEN (512+128+2) /* not including head */
+
+#define TEST_POISON1 0xDEADBEEF
+#define TEST_POISON2 0xA324354C
+
+struct debug_el {
+ unsigned int poison1;
+ struct list_head list;
+ unsigned int poison2;
+ int value;
+ unsigned int serial;
+};
+
+static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
+{
+ struct debug_el **elts = test->priv;
+
+ KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+ KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
+ KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
+
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
+
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
+ KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
+}
+
+/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
+static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
+{
+ struct debug_el *ela, *elb;
+
+ ela = container_of(a, struct debug_el, list);
+ elb = container_of(b, struct debug_el, list);
+
+ check(priv, ela, elb);
+ return ela->value - elb->value;
+}
+
+static void list_sort_test(struct kunit *test)
+{
+ int i, count = 1;
+ struct debug_el *el, **elts;
+ struct list_head *cur;
+ LIST_HEAD(head);
+
+ elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
+ test->priv = elts;
+
+ for (i = 0; i < TEST_LIST_LEN; i++) {
+ el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
+
+ /* force some equivalencies */
+ el->value = get_random_u32_below(TEST_LIST_LEN / 3);
+ el->serial = i;
+ el->poison1 = TEST_POISON1;
+ el->poison2 = TEST_POISON2;
+ elts[i] = el;
+ list_add_tail(&el->list, &head);
+ }
+
+ list_sort(test, &head, cmp);
+
+ for (cur = head.next; cur->next != &head; cur = cur->next) {
+ struct debug_el *el1;
+ int cmp_result;
+
+ KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
+ "list is corrupted");
+
+ cmp_result = cmp(test, cur, cur->next);
+ KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
+
+ el = container_of(cur, struct debug_el, list);
+ el1 = container_of(cur->next, struct debug_el, list);
+ if (cmp_result == 0) {
+ KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
+ "order of equivalent elements not preserved");
+ }
+
+ check(test, el, el1);
+ count++;
+ }
+ KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
+
+ KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
+ "list length changed after sorting!");
+}
+
+static struct kunit_case list_sort_cases[] = {
+ KUNIT_CASE(list_sort_test),
+ {}
+};
+
+static struct kunit_suite list_sort_suite = {
+ .name = "list_sort",
+ .test_cases = list_sort_cases,
+};
+
+kunit_test_suites(&list_sort_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
new file mode 100644
index 000000000..c3fd87d6c
--- /dev/null
+++ b/lib/test_lockup.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test module to generate lockups
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/clock.h>
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+
+static unsigned int time_secs;
+module_param(time_secs, uint, 0600);
+MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0");
+
+static unsigned int time_nsecs;
+module_param(time_nsecs, uint, 0600);
+MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0");
+
+static unsigned int cooldown_secs;
+module_param(cooldown_secs, uint, 0600);
+MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0");
+
+static unsigned int cooldown_nsecs;
+module_param(cooldown_nsecs, uint, 0600);
+MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0");
+
+static unsigned int iterations = 1;
+module_param(iterations, uint, 0600);
+MODULE_PARM_DESC(iterations, "lockup iterations, default 1");
+
+static bool all_cpus;
+module_param(all_cpus, bool, 0400);
+MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once");
+
+static int wait_state;
+static char *state = "R";
+module_param(state, charp, 0400);
+MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state");
+
+static bool use_hrtimer;
+module_param(use_hrtimer, bool, 0400);
+MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping");
+
+static bool iowait;
+module_param(iowait, bool, 0400);
+MODULE_PARM_DESC(iowait, "account sleep time as iowait");
+
+static bool lock_read;
+module_param(lock_read, bool, 0400);
+MODULE_PARM_DESC(lock_read, "lock read-write locks for read");
+
+static bool lock_single;
+module_param(lock_single, bool, 0400);
+MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu");
+
+static bool reacquire_locks;
+module_param(reacquire_locks, bool, 0400);
+MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations");
+
+static bool touch_softlockup;
+module_param(touch_softlockup, bool, 0600);
+MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations");
+
+static bool touch_hardlockup;
+module_param(touch_hardlockup, bool, 0600);
+MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations");
+
+static bool call_cond_resched;
+module_param(call_cond_resched, bool, 0600);
+MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations");
+
+static bool measure_lock_wait;
+module_param(measure_lock_wait, bool, 0400);
+MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time");
+
+static unsigned long lock_wait_threshold = ULONG_MAX;
+module_param(lock_wait_threshold, ulong, 0400);
+MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off");
+
+static bool test_disable_irq;
+module_param_named(disable_irq, test_disable_irq, bool, 0400);
+MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups");
+
+static bool disable_softirq;
+module_param(disable_softirq, bool, 0400);
+MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers");
+
+static bool disable_preempt;
+module_param(disable_preempt, bool, 0400);
+MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups");
+
+static bool lock_rcu;
+module_param(lock_rcu, bool, 0400);
+MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
+
+static bool lock_mmap_sem;
+module_param(lock_mmap_sem, bool, 0400);
+MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
+
+static unsigned long lock_rwsem_ptr;
+module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address");
+
+static unsigned long lock_mutex_ptr;
+module_param_unsafe(lock_mutex_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address");
+
+static unsigned long lock_spinlock_ptr;
+module_param_unsafe(lock_spinlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address");
+
+static unsigned long lock_rwlock_ptr;
+module_param_unsafe(lock_rwlock_ptr, ulong, 0400);
+MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address");
+
+static unsigned int alloc_pages_nr;
+module_param_unsafe(alloc_pages_nr, uint, 0600);
+MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks");
+
+static unsigned int alloc_pages_order;
+module_param(alloc_pages_order, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_order, "page order to allocate");
+
+static gfp_t alloc_pages_gfp = GFP_KERNEL;
+module_param_unsafe(alloc_pages_gfp, uint, 0400);
+MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL");
+
+static bool alloc_pages_atomic;
+module_param(alloc_pages_atomic, bool, 0400);
+MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC");
+
+static bool reallocate_pages;
+module_param(reallocate_pages, bool, 0400);
+MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
+
+struct file *test_file;
+static struct inode *test_inode;
+static char test_file_path[256];
+module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
+MODULE_PARM_DESC(file_path, "file path to test");
+
+static bool test_lock_inode;
+module_param_named(lock_inode, test_lock_inode, bool, 0400);
+MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem");
+
+static bool test_lock_mapping;
+module_param_named(lock_mapping, test_lock_mapping, bool, 0400);
+MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem");
+
+static bool test_lock_sb_umount;
+module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400);
+MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount");
+
+static atomic_t alloc_pages_failed = ATOMIC_INIT(0);
+
+static atomic64_t max_lock_wait = ATOMIC64_INIT(0);
+
+static struct task_struct *main_task;
+static int master_cpu;
+
+static void test_lock(bool master, bool verbose)
+{
+ u64 wait_start;
+
+ if (measure_lock_wait)
+ wait_start = local_clock();
+
+ if (lock_mutex_ptr && master) {
+ if (verbose)
+ pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr);
+ mutex_lock((struct mutex *)lock_mutex_ptr);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (verbose)
+ pr_notice("lock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ if (lock_read)
+ down_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ down_write((struct rw_semaphore *)lock_rwsem_ptr);
+ }
+
+ if (lock_mmap_sem && master) {
+ if (verbose)
+ pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
+ if (lock_read)
+ mmap_read_lock(main_task->mm);
+ else
+ mmap_write_lock(main_task->mm);
+ }
+
+ if (test_disable_irq)
+ local_irq_disable();
+
+ if (disable_softirq)
+ local_bh_disable();
+
+ if (disable_preempt)
+ preempt_disable();
+
+ if (lock_rcu)
+ rcu_read_lock();
+
+ if (lock_spinlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ spin_lock((spinlock_t *)lock_spinlock_ptr);
+ }
+
+ if (lock_rwlock_ptr && master) {
+ if (verbose)
+ pr_notice("lock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ if (lock_read)
+ read_lock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_lock((rwlock_t *)lock_rwlock_ptr);
+ }
+
+ if (measure_lock_wait) {
+ s64 cur_wait = local_clock() - wait_start;
+ s64 max_wait = atomic64_read(&max_lock_wait);
+
+ do {
+ if (cur_wait < max_wait)
+ break;
+ max_wait = atomic64_cmpxchg(&max_lock_wait,
+ max_wait, cur_wait);
+ } while (max_wait != cur_wait);
+
+ if (cur_wait > lock_wait_threshold)
+ pr_notice_ratelimited("lock wait %lld ns\n", cur_wait);
+ }
+}
+
+static void test_unlock(bool master, bool verbose)
+{
+ if (lock_rwlock_ptr && master) {
+ if (lock_read)
+ read_unlock((rwlock_t *)lock_rwlock_ptr);
+ else
+ write_unlock((rwlock_t *)lock_rwlock_ptr);
+ if (verbose)
+ pr_notice("unlock rwlock %ps\n",
+ (void *)lock_rwlock_ptr);
+ }
+
+ if (lock_spinlock_ptr && master) {
+ spin_unlock((spinlock_t *)lock_spinlock_ptr);
+ if (verbose)
+ pr_notice("unlock spinlock %ps\n",
+ (void *)lock_spinlock_ptr);
+ }
+
+ if (lock_rcu)
+ rcu_read_unlock();
+
+ if (disable_preempt)
+ preempt_enable();
+
+ if (disable_softirq)
+ local_bh_enable();
+
+ if (test_disable_irq)
+ local_irq_enable();
+
+ if (lock_mmap_sem && master) {
+ if (lock_read)
+ mmap_read_unlock(main_task->mm);
+ else
+ mmap_write_unlock(main_task->mm);
+ if (verbose)
+ pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
+ }
+
+ if (lock_rwsem_ptr && master) {
+ if (lock_read)
+ up_read((struct rw_semaphore *)lock_rwsem_ptr);
+ else
+ up_write((struct rw_semaphore *)lock_rwsem_ptr);
+ if (verbose)
+ pr_notice("unlock rw_semaphore %ps\n",
+ (void *)lock_rwsem_ptr);
+ }
+
+ if (lock_mutex_ptr && master) {
+ mutex_unlock((struct mutex *)lock_mutex_ptr);
+ if (verbose)
+ pr_notice("unlock mutex %ps\n",
+ (void *)lock_mutex_ptr);
+ }
+}
+
+static void test_alloc_pages(struct list_head *pages)
+{
+ struct page *page;
+ unsigned int i;
+
+ for (i = 0; i < alloc_pages_nr; i++) {
+ page = alloc_pages(alloc_pages_gfp, alloc_pages_order);
+ if (!page) {
+ atomic_inc(&alloc_pages_failed);
+ break;
+ }
+ list_add(&page->lru, pages);
+ }
+}
+
+static void test_free_pages(struct list_head *pages)
+{
+ struct page *page, *next;
+
+ list_for_each_entry_safe(page, next, pages, lru)
+ __free_pages(page, alloc_pages_order);
+ INIT_LIST_HEAD(pages);
+}
+
+static void test_wait(unsigned int secs, unsigned int nsecs)
+{
+ if (wait_state == TASK_RUNNING) {
+ if (secs)
+ mdelay(secs * MSEC_PER_SEC);
+ if (nsecs)
+ ndelay(nsecs);
+ return;
+ }
+
+ __set_current_state(wait_state);
+ if (use_hrtimer) {
+ ktime_t time;
+
+ time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs);
+ schedule_hrtimeout(&time, HRTIMER_MODE_REL);
+ } else {
+ schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs));
+ }
+}
+
+static void test_lockup(bool master)
+{
+ u64 lockup_start = local_clock();
+ unsigned int iter = 0;
+ LIST_HEAD(pages);
+
+ pr_notice("Start on CPU%d\n", raw_smp_processor_id());
+
+ test_lock(master, true);
+
+ test_alloc_pages(&pages);
+
+ while (iter++ < iterations && !signal_pending(main_task)) {
+
+ if (iowait)
+ current->in_iowait = 1;
+
+ test_wait(time_secs, time_nsecs);
+
+ if (iowait)
+ current->in_iowait = 0;
+
+ if (reallocate_pages)
+ test_free_pages(&pages);
+
+ if (reacquire_locks)
+ test_unlock(master, false);
+
+ if (touch_softlockup)
+ touch_softlockup_watchdog();
+
+ if (touch_hardlockup)
+ touch_nmi_watchdog();
+
+ if (call_cond_resched)
+ cond_resched();
+
+ test_wait(cooldown_secs, cooldown_nsecs);
+
+ if (reacquire_locks)
+ test_lock(master, false);
+
+ if (reallocate_pages)
+ test_alloc_pages(&pages);
+ }
+
+ pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(),
+ local_clock() - lockup_start);
+
+ test_free_pages(&pages);
+
+ test_unlock(master, true);
+}
+
+static DEFINE_PER_CPU(struct work_struct, test_works);
+
+static void test_work_fn(struct work_struct *work)
+{
+ test_lockup(!lock_single ||
+ work == per_cpu_ptr(&test_works, master_cpu));
+}
+
+static bool test_kernel_ptr(unsigned long addr, int size)
+{
+ void *ptr = (void *)addr;
+ char buf;
+
+ if (!addr)
+ return false;
+
+ /* should be at least readable kernel address */
+ if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) &&
+ (access_ok((void __user *)ptr, 1) ||
+ access_ok((void __user *)ptr + size - 1, 1))) {
+ pr_err("user space ptr invalid in kernel: %#lx\n", addr);
+ return true;
+ }
+
+ if (get_kernel_nofault(buf, ptr) ||
+ get_kernel_nofault(buf, ptr + size - 1)) {
+ pr_err("invalid kernel ptr: %#lx\n", addr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool __maybe_unused test_magic(unsigned long addr, int offset,
+ unsigned int expected)
+{
+ void *ptr = (void *)addr + offset;
+ unsigned int magic = 0;
+
+ if (!addr)
+ return false;
+
+ if (get_kernel_nofault(magic, ptr) || magic != expected) {
+ pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
+ addr, offset, magic, expected);
+ return true;
+ }
+
+ return false;
+}
+
+static int __init test_lockup_init(void)
+{
+ u64 test_start = local_clock();
+
+ main_task = current;
+
+ switch (state[0]) {
+ case 'S':
+ wait_state = TASK_INTERRUPTIBLE;
+ break;
+ case 'D':
+ wait_state = TASK_UNINTERRUPTIBLE;
+ break;
+ case 'K':
+ wait_state = TASK_KILLABLE;
+ break;
+ case 'R':
+ wait_state = TASK_RUNNING;
+ break;
+ default:
+ pr_err("unknown state=%s\n", state);
+ return -EINVAL;
+ }
+
+ if (alloc_pages_atomic)
+ alloc_pages_gfp = GFP_ATOMIC;
+
+ if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) ||
+ test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) ||
+ test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) ||
+ test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore)))
+ return -EINVAL;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#ifdef CONFIG_PREEMPT_RT
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, lock.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#else
+ if (test_magic(lock_spinlock_ptr,
+ offsetof(spinlock_t, rlock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwlock_ptr,
+ offsetof(rwlock_t, magic),
+ RWLOCK_MAGIC) ||
+ test_magic(lock_mutex_ptr,
+ offsetof(struct mutex, wait_lock.magic),
+ SPINLOCK_MAGIC) ||
+ test_magic(lock_rwsem_ptr,
+ offsetof(struct rw_semaphore, wait_lock.magic),
+ SPINLOCK_MAGIC))
+ return -EINVAL;
+#endif
+#endif
+
+ if ((wait_state != TASK_RUNNING ||
+ (call_cond_resched && !reacquire_locks) ||
+ (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) &&
+ (test_disable_irq || disable_softirq || disable_preempt ||
+ lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) {
+ pr_err("refuse to sleep in atomic context\n");
+ return -EINVAL;
+ }
+
+ if (lock_mmap_sem && !main_task->mm) {
+ pr_err("no mm to lock mmap_lock\n");
+ return -EINVAL;
+ }
+
+ if (test_file_path[0]) {
+ test_file = filp_open(test_file_path, O_RDONLY, 0);
+ if (IS_ERR(test_file)) {
+ pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file));
+ return PTR_ERR(test_file);
+ }
+ test_inode = file_inode(test_file);
+ } else if (test_lock_inode ||
+ test_lock_mapping ||
+ test_lock_sb_umount) {
+ pr_err("no file to lock\n");
+ return -EINVAL;
+ }
+
+ if (test_lock_inode && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem;
+
+ if (test_lock_mapping && test_file && test_file->f_mapping)
+ lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem;
+
+ if (test_lock_sb_umount && test_inode)
+ lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount;
+
+ pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n",
+ main_task->pid, time_secs, time_nsecs,
+ cooldown_secs, cooldown_nsecs, iterations, state,
+ all_cpus ? "all_cpus " : "",
+ iowait ? "iowait " : "",
+ test_disable_irq ? "disable_irq " : "",
+ disable_softirq ? "disable_softirq " : "",
+ disable_preempt ? "disable_preempt " : "",
+ lock_rcu ? "lock_rcu " : "",
+ lock_read ? "lock_read " : "",
+ touch_softlockup ? "touch_softlockup " : "",
+ touch_hardlockup ? "touch_hardlockup " : "",
+ call_cond_resched ? "call_cond_resched " : "",
+ reacquire_locks ? "reacquire_locks " : "");
+
+ if (alloc_pages_nr)
+ pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n",
+ alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp,
+ reallocate_pages ? "reallocate_pages " : "");
+
+ if (all_cpus) {
+ unsigned int cpu;
+
+ cpus_read_lock();
+
+ preempt_disable();
+ master_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+ INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn);
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&test_works, cpu));
+ }
+ preempt_enable();
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(&test_works, cpu));
+
+ cpus_read_unlock();
+ } else {
+ test_lockup(true);
+ }
+
+ if (measure_lock_wait)
+ pr_notice("Maximum lock wait: %lld ns\n",
+ atomic64_read(&max_lock_wait));
+
+ if (alloc_pages_nr)
+ pr_notice("Page allocation failed %u times\n",
+ atomic_read(&alloc_pages_failed));
+
+ pr_notice("FINISH in %llu ns\n", local_clock() - test_start);
+
+ if (test_file)
+ fput(test_file);
+
+ if (signal_pending(main_task))
+ return -EINTR;
+
+ return -EAGAIN;
+}
+module_init(test_lockup_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Konstantin Khlebnikov <khlebnikov@yandex-team.ru>");
+MODULE_DESCRIPTION("Test module to generate lockups");
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
new file mode 100644
index 000000000..464eeb90d
--- /dev/null
+++ b/lib/test_maple_tree.c
@@ -0,0 +1,3878 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_maple_tree.c: Test the maple tree API
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ *
+ * Any tests that only require the interface of the tree.
+ */
+
+#include <linux/maple_tree.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+
+#define MTREE_ALLOC_MAX 0x2000000000000Ul
+#define CONFIG_MAPLE_SEARCH
+#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
+
+#ifndef CONFIG_DEBUG_MAPLE_TREE
+#define mt_dump(mt, fmt) do {} while (0)
+#define mt_validate(mt) do {} while (0)
+#define mt_cache_shrink() do {} while (0)
+#define mas_dump(mas) do {} while (0)
+#define mas_wr_dump(mas) do {} while (0)
+atomic_t maple_tree_tests_run;
+atomic_t maple_tree_tests_passed;
+#undef MT_BUG_ON
+
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#endif
+
+/* #define BENCH_SLOT_STORE */
+/* #define BENCH_NODE_STORE */
+/* #define BENCH_AWALK */
+/* #define BENCH_WALK */
+/* #define BENCH_MT_FOR_EACH */
+/* #define BENCH_FORK */
+/* #define BENCH_MAS_FOR_EACH */
+/* #define BENCH_MAS_PREV */
+
+#ifdef __KERNEL__
+#define mt_set_non_kernel(x) do {} while (0)
+#define mt_zero_nr_tallocated(x) do {} while (0)
+#else
+#define cond_resched() do {} while (0)
+#endif
+static int __init mtree_insert_index(struct maple_tree *mt,
+ unsigned long index, gfp_t gfp)
+{
+ return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
+}
+
+static void __init mtree_erase_index(struct maple_tree *mt, unsigned long index)
+{
+ MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX));
+ MT_BUG_ON(mt, mtree_load(mt, index) != NULL);
+}
+
+static int __init mtree_test_insert(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ return mtree_insert(mt, index, ptr, GFP_KERNEL);
+}
+
+static int __init mtree_test_store_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
+{
+ return mtree_store_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static int __init mtree_test_store(struct maple_tree *mt, unsigned long start,
+ void *ptr)
+{
+ return mtree_test_store_range(mt, start, start, ptr);
+}
+
+static int __init mtree_test_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
+{
+ return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static void __init *mtree_test_load(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_load(mt, index);
+}
+
+static void __init *mtree_test_erase(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_erase(mt, index);
+}
+
+#if defined(CONFIG_64BIT)
+static noinline void __init check_mtree_alloc_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_range(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
+
+static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
+#endif
+
+static noinline void __init check_load(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ void *ret = mtree_test_load(mt, index);
+
+ if (ret != ptr)
+ pr_err("Load %lu returned %p expect %p\n", index, ret, ptr);
+ MT_BUG_ON(mt, ret != ptr);
+}
+
+static noinline void __init check_store_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr, int expected)
+{
+ int ret = -EINVAL;
+ unsigned long i;
+
+ ret = mtree_test_store_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != expected);
+
+ if (ret)
+ return;
+
+ for (i = start; i <= end; i++)
+ check_load(mt, i, ptr);
+}
+
+static noinline void __init check_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr, int expected)
+{
+ int ret = -EINVAL;
+ unsigned long i;
+
+ ret = mtree_test_insert_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != expected);
+
+ if (ret)
+ return;
+
+ for (i = start; i <= end; i++)
+ check_load(mt, i, ptr);
+}
+
+static noinline void __init check_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ int ret = -EINVAL;
+
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != 0);
+}
+
+static noinline void __init check_dup_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ int ret = -EINVAL;
+
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != -EEXIST);
+}
+
+
+static noinline void __init check_index_load(struct maple_tree *mt,
+ unsigned long index)
+{
+ return check_load(mt, index, xa_mk_value(index & LONG_MAX));
+}
+
+static inline __init int not_empty(struct maple_node *node)
+{
+ int i;
+
+ if (node->parent)
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(node->slot); i++)
+ if (node->slot[i])
+ return 1;
+
+ return 0;
+}
+
+
+static noinline void __init check_rev_seq(struct maple_tree *mt,
+ unsigned long max, bool verbose)
+{
+ unsigned long i = max, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ mt_zero_nr_tallocated();
+ while (i) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = i; j <= max; j++)
+ check_index_load(mt, j);
+
+ check_load(mt, i - 1, NULL);
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mt_clear_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ i--;
+ }
+ check_load(mt, max + 1, NULL);
+
+#ifndef __KERNEL__
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt, mt_dump_dec);
+ pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
+ __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ mt_nr_tallocated());
+ }
+#endif
+}
+
+static noinline void __init check_seq(struct maple_tree *mt, unsigned long max,
+ bool verbose)
+{
+ unsigned long i, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ mt_zero_nr_tallocated();
+ for (i = 0; i <= max; i++) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = 0; j <= i; j++)
+ check_index_load(mt, j);
+
+ if (i)
+ MT_BUG_ON(mt, !mt_height(mt));
+ check_load(mt, i + 1, NULL);
+ }
+
+#ifndef __KERNEL__
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt, mt_dump_dec);
+ pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
+ max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ mt_nr_tallocated());
+ }
+#endif
+}
+
+static noinline void __init check_lb_not_empty(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge = 4000UL * 1000 * 1000;
+
+
+ i = huge;
+ while (i > 4096) {
+ check_insert(mt, i, (void *) i);
+ for (j = huge; j >= i; j /= 2) {
+ check_load(mt, j-1, NULL);
+ check_load(mt, j, (void *) j);
+ check_load(mt, j+1, NULL);
+ }
+ i /= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_lower_bound_split(struct maple_tree *mt)
+{
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_lb_not_empty(mt);
+}
+
+static noinline void __init check_upper_bound_split(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ if (MAPLE_32BIT)
+ huge = 2147483647UL;
+ else
+ huge = 4000UL * 1000 * 1000;
+
+ i = 4096;
+ while (i < huge) {
+ check_insert(mt, i, (void *) i);
+ for (j = i; j >= huge; j *= 2) {
+ check_load(mt, j-1, NULL);
+ check_load(mt, j, (void *) j);
+ check_load(mt, j+1, NULL);
+ }
+ i *= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_mid_split(struct maple_tree *mt)
+{
+ unsigned long huge = 8000UL * 1000 * 1000;
+
+ check_insert(mt, huge, (void *) huge);
+ check_insert(mt, 0, xa_mk_value(0));
+ check_lb_not_empty(mt);
+}
+
+static noinline void __init check_rev_find(struct maple_tree *mt)
+{
+ int i, nr_entries = 200;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ rcu_read_lock();
+ mas_set(&mas, 1000);
+ val = mas_find_rev(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(100));
+ val = mas_find_rev(&mas, 1000);
+ MT_BUG_ON(mt, val != NULL);
+
+ mas_set(&mas, 999);
+ val = mas_find_rev(&mas, 997);
+ MT_BUG_ON(mt, val != NULL);
+
+ mas_set(&mas, 1000);
+ val = mas_find_rev(&mas, 900);
+ MT_BUG_ON(mt, val != xa_mk_value(100));
+ val = mas_find_rev(&mas, 900);
+ MT_BUG_ON(mt, val != xa_mk_value(99));
+
+ mas_set(&mas, 20);
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(2));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(1));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(0));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ rcu_read_unlock();
+}
+
+static noinline void __init check_find(struct maple_tree *mt)
+{
+ unsigned long val = 0;
+ unsigned long count;
+ unsigned long max;
+ unsigned long top;
+ unsigned long last = 0, index = 0;
+ void *entry, *entry2;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ /* Insert 0. */
+ MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL));
+
+#if defined(CONFIG_64BIT)
+ top = 4398046511104UL;
+#else
+ top = ULONG_MAX;
+#endif
+
+ if (MAPLE_32BIT) {
+ count = 15;
+ } else {
+ count = 20;
+ }
+
+ for (int i = 0; i <= count; i++) {
+ if (val != 64)
+ MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL));
+ else
+ MT_BUG_ON(mt, mtree_insert(mt, val,
+ XA_ZERO_ENTRY, GFP_KERNEL));
+
+ val <<= 2;
+ }
+
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ while ((entry = mas_find(&mas, 268435456)) != NULL) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+ mas_unlock(&mas);
+
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+ mas_unlock(&mas);
+
+ /* Test mas_pause */
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+
+ mas_pause(&mas);
+ mas_unlock(&mas);
+ mas_lock(&mas);
+ }
+ mas_unlock(&mas);
+
+ val = 0;
+ max = 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */
+ mt_for_each(mt, entry, index, max) {
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ val <<= 2;
+ if (val == 64) /* Skip zero entry. */
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+
+ val = 0;
+ max = 0;
+ index = 0;
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+ mt_for_each(mt, entry, index, ULONG_MAX) {
+ if (val == top)
+ MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+
+ /* Workaround for 32bit */
+ if ((val << 2) < val)
+ val = ULONG_MAX;
+ else
+ val <<= 2;
+
+ if (val == 64) /* Skip zero entry. */
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ max++;
+ MT_BUG_ON(mt, max > 25);
+ }
+ mtree_erase_index(mt, ULONG_MAX);
+
+ mas_reset(&mas);
+ index = 17;
+ entry = mt_find(mt, &index, 512);
+ MT_BUG_ON(mt, xa_mk_value(256) != entry);
+
+ mas_reset(&mas);
+ index = 17;
+ entry = mt_find(mt, &index, 20);
+ MT_BUG_ON(mt, entry != NULL);
+
+
+ /* Range check.. */
+ /* Insert ULONG_MAX */
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+
+ val = 0;
+ mas_set(&mas, 0);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val == 64)
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ else if (val == top)
+ MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+
+ /* Workaround for 32bit */
+ if ((val << 2) < val)
+ val = ULONG_MAX;
+ else
+ val <<= 2;
+
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ mas_pause(&mas);
+ mas_unlock(&mas);
+ mas_lock(&mas);
+ }
+ mas_unlock(&mas);
+
+ mas_set(&mas, 1048576);
+ mas_lock(&mas);
+ entry = mas_find(&mas, 1048576);
+ mas_unlock(&mas);
+ MT_BUG_ON(mas.tree, entry == NULL);
+
+ /*
+ * Find last value.
+ * 1. get the expected value, leveraging the existence of an end entry
+ * 2. delete end entry
+ * 3. find the last value but searching for ULONG_MAX and then using
+ * prev
+ */
+ /* First, get the expected result. */
+ mas_lock(&mas);
+ mas_reset(&mas);
+ mas.index = ULONG_MAX; /* start at max.. */
+ entry = mas_find(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0);
+ index = mas.index;
+ last = mas.last;
+
+ /* Erase the last entry. */
+ mas_reset(&mas);
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ mas_erase(&mas);
+
+ /* Get the previous value from MAS_START */
+ mas_reset(&mas);
+ entry2 = mas_prev(&mas, 0);
+
+ /* Check results. */
+ MT_BUG_ON(mt, entry != entry2);
+ MT_BUG_ON(mt, index != mas.index);
+ MT_BUG_ON(mt, last != mas.last);
+
+
+ mas.node = MAS_NONE;
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ entry2 = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != entry2);
+
+ mas_set(&mas, 0);
+ MT_BUG_ON(mt, mas_prev(&mas, 0) != NULL);
+
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_find_2(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ void *entry;
+
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX)
+ MT_BUG_ON(mt, true);
+ rcu_read_unlock();
+
+ for (i = 0; i < 256; i++) {
+ mtree_insert_index(mt, i, GFP_KERNEL);
+ j = 0;
+ mas_set(&mas, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j++;
+ }
+ rcu_read_unlock();
+ MT_BUG_ON(mt, j != i + 1);
+ }
+
+ for (i = 0; i < 256; i++) {
+ mtree_erase_index(mt, i);
+ j = i + 1;
+ mas_set(&mas, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j++;
+ }
+ rcu_read_unlock();
+ MT_BUG_ON(mt, j != 256);
+ }
+
+ /*MT_BUG_ON(mt, !mtree_empty(mt)); */
+}
+
+
+#if defined(CONFIG_64BIT)
+static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
+{
+ /*
+ * Generated by:
+ * cat /proc/self/maps | awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
+ static const unsigned long range[] = {
+ /* Inclusive , Exclusive. */
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+
+ static const unsigned long holes[] = {
+ /*
+ * Note: start of hole is INCLUSIVE
+ * end of hole is EXCLUSIVE
+ * (opposite of the above table.)
+ * Start of hole, end of hole, size of hole (+1)
+ */
+ 0x565234afb000, 0x565234afc000, 0x1000,
+ 0x565234afe000, 0x565235def000, 0x12F1000,
+ 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
+ };
+
+ /*
+ * req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ static const unsigned long req_range[] = {
+ 0x565234af9000, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1000, /* Size */
+ 0x7fff5878d << 12, /* First rev hole of size 0x1000 */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x565234AF0 << 12, /* Max */
+ 0x3000, /* Size */
+ 0x565234AEE << 12, /* max - 3. */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ -1, /* Max */
+ 0x1000, /* Size */
+ 562949953421311 << 12,/* First rev hole of size 0x1000 */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x7F36D5109 << 12, /* Max */
+ 0x4000, /* Size */
+ 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
+ 0, /* Return value success. */
+
+ /* Ascend test. */
+ 0x0,
+ 34148798628 << 12,
+ 19 << 12,
+ 34148797418 << 12,
+ 0x0,
+
+ /* Too big test. */
+ 0x0,
+ 18446744073709551615UL,
+ 562915594369134UL << 12,
+ 0x0,
+ -EBUSY,
+
+ /* Single space test. */
+ 34148798725 << 12,
+ 34148798725 << 12,
+ 1 << 12,
+ 34148798725 << 12,
+ 0,
+ };
+
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
+ unsigned long min = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+#define DEBUG_REV_RANGE 0
+ for (i = 0; i < range_count; i += 2) {
+ /* Inclusive, Inclusive (with the -1) */
+
+#if DEBUG_REV_RANGE
+ pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12,
+ (range[i + 1] >> 12) - 1);
+#endif
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12), 0);
+ mt_validate(mt);
+ }
+
+
+ mas_lock(&mas);
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+#if DEBUG_REV_RANGE
+ pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
+ min, holes[i+1]>>12, holes[i+2]>>12,
+ holes[i] >> 12);
+#endif
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, min,
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+#if DEBUG_REV_RANGE
+ pr_debug("Found %lu %lu\n", mas.index, mas.last);
+ pr_debug("gap %lu %lu\n", (holes[i] >> 12),
+ (holes[i+1] >> 12));
+#endif
+ MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
+ MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12));
+ min = holes[i+1] >> 12;
+ mas_reset(&mas);
+ }
+
+ mas_unlock(&mas);
+ for (i = 0; i < req_range_count; i += 5) {
+#if DEBUG_REV_RANGE
+ pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n",
+ i, req_range[i] >> 12,
+ (req_range[i + 1] >> 12),
+ req_range[i+2] >> 12,
+ req_range[i+3] >> 12);
+#endif
+ check_mtree_alloc_rrange(mt,
+ req_range[i] >> 12, /* start */
+ req_range[i+1] >> 12, /* end */
+ req_range[i+2] >> 12, /* size */
+ req_range[i+3] >> 12, /* expected address */
+ req_range[i+4], /* expected return */
+ xa_mk_value(req_range[i] >> 12)); /* pointer */
+ mt_validate(mt);
+ }
+
+ mt_set_non_kernel(1);
+ mtree_erase(mt, 34148798727); /* create a deleted range. */
+ mtree_erase(mt, 34148798725);
+ check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
+ 34148798725, 0, mt);
+
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_alloc_range(struct maple_tree *mt)
+{
+ /*
+ * Generated by:
+ * cat /proc/self/maps|awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
+ static const unsigned long range[] = {
+ /* Inclusive , Exclusive. */
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+ static const unsigned long holes[] = {
+ /* Start of hole, end of hole, size of hole (+1) */
+ 0x565234afb000, 0x565234afc000, 0x1000,
+ 0x565234afe000, 0x565235def000, 0x12F1000,
+ 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
+ };
+
+ /*
+ * req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ static const unsigned long req_range[] = {
+ 0x565234af9000, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1000, /* Size */
+ 0x565234afb000, /* First hole in our data of size 1000. */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1F00, /* Size */
+ 0x0, /* First hole in our data of size 2000. */
+ 0, /* Return value success. */
+
+ /* Test ascend. */
+ 34148797436 << 12, /* Min */
+ 0x7fff587AF000, /* Max */
+ 0x3000, /* Size */
+ 34148798629 << 12, /* Expected location */
+ 0, /* Return value success. */
+
+ /* Test failing. */
+ 34148798623 << 12, /* Min */
+ 34148798683 << 12, /* Max */
+ 0x15000, /* Size */
+ 0, /* Expected location */
+ -EBUSY, /* Return value failed. */
+
+ /* Test filling entire gap. */
+ 34148798623 << 12, /* Min */
+ 0x7fff587AF000, /* Max */
+ 0x10000, /* Size */
+ 34148798632 << 12, /* Expected location */
+ 0, /* Return value success. */
+
+ /* Test walking off the end of root. */
+ 0, /* Min */
+ -1, /* Max */
+ -1, /* Size */
+ 0, /* Expected location */
+ -EBUSY, /* Return value failure. */
+
+ /* Test looking for too large a hole across entire range. */
+ 0, /* Min */
+ -1, /* Max */
+ 4503599618982063UL << 12, /* Size */
+ 34359052178 << 12, /* Expected location */
+ -EBUSY, /* Return failure. */
+
+ /* Test a single entry */
+ 34148798648 << 12, /* Min */
+ 34148798648 << 12, /* Max */
+ 4096, /* Size of 1 */
+ 34148798648 << 12, /* Location is the same as min/max */
+ 0, /* Success */
+ };
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
+ unsigned long min = 0x565234af2000;
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ for (i = 0; i < range_count; i += 2) {
+#define DEBUG_ALLOC_RANGE 0
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
+ (range[i + 1] >> 12) - 1);
+ mt_dump(mt, mt_dump_hex);
+#endif
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12), 0);
+ mt_validate(mt);
+ }
+
+
+
+ mas_lock(&mas);
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min >> 12,
+ holes[i+1] >> 12, holes[i+2] >> 12,
+ min, holes[i+1]);
+#endif
+ MT_BUG_ON(mt, mas_empty_area(&mas, min >> 12,
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+ MT_BUG_ON(mt, mas.index != holes[i] >> 12);
+ min = holes[i+1];
+ mas_reset(&mas);
+ }
+ mas_unlock(&mas);
+ for (i = 0; i < req_range_count; i += 5) {
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n",
+ i/5, req_range[i] >> 12, req_range[i + 1] >> 12,
+ req_range[i + 2] >> 12, req_range[i + 3] >> 12,
+ req_range[i], req_range[i+1]);
+#endif
+ check_mtree_alloc_range(mt,
+ req_range[i] >> 12, /* start */
+ req_range[i+1] >> 12, /* end */
+ req_range[i+2] >> 12, /* size */
+ req_range[i+3] >> 12, /* expected address */
+ req_range[i+4], /* expected return */
+ xa_mk_value(req_range[i] >> 12)); /* pointer */
+ mt_validate(mt);
+#if DEBUG_ALLOC_RANGE
+ mt_dump(mt, mt_dump_hex);
+#endif
+ }
+
+ mtree_destroy(mt);
+}
+#endif
+
+static noinline void __init check_ranges(struct maple_tree *mt)
+{
+ int i, val, val2;
+ static const unsigned long r[] = {
+ 10, 15,
+ 20, 25,
+ 17, 22, /* Overlaps previous range. */
+ 9, 1000, /* Huge. */
+ 100, 200,
+ 45, 168,
+ 118, 128,
+ };
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0);
+ check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0);
+ check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST);
+ MT_BUG_ON(mt, !mt_height(mt));
+ /* Store */
+ check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0);
+ check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0);
+ check_store_range(mt, r[8], r[9], xa_mk_value(r[8]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+ MT_BUG_ON(mt, mt_height(mt));
+
+ check_seq(mt, 50, false);
+ mt_set_non_kernel(4);
+ check_store_range(mt, 5, 47, xa_mk_value(47), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Create tree of 1-100 */
+ check_seq(mt, 100, false);
+ /* Store 45-168 */
+ mt_set_non_kernel(10);
+ check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Create tree of 1-200 */
+ check_seq(mt, 200, false);
+ /* Store 45-168 */
+ check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ check_seq(mt, 30, false);
+ check_store_range(mt, 6, 18, xa_mk_value(6), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Overwrite across multiple levels. */
+ /* Create tree of 1-400 */
+ check_seq(mt, 400, false);
+ mt_set_non_kernel(50);
+ /* Store 118-128 */
+ check_store_range(mt, r[12], r[13], xa_mk_value(r[12]), 0);
+ mt_set_non_kernel(50);
+ mtree_test_erase(mt, 140);
+ mtree_test_erase(mt, 141);
+ mtree_test_erase(mt, 142);
+ mtree_test_erase(mt, 143);
+ mtree_test_erase(mt, 130);
+ mtree_test_erase(mt, 131);
+ mtree_test_erase(mt, 132);
+ mtree_test_erase(mt, 133);
+ mtree_test_erase(mt, 134);
+ mtree_test_erase(mt, 135);
+ check_load(mt, r[12], xa_mk_value(r[12]));
+ check_load(mt, r[13], xa_mk_value(r[12]));
+ check_load(mt, r[13] - 1, xa_mk_value(r[12]));
+ check_load(mt, r[13] + 1, xa_mk_value(r[13] + 1));
+ check_load(mt, 135, NULL);
+ check_load(mt, 140, NULL);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+
+
+ /* Overwrite multiple levels at the end of the tree (slot 7) */
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 353, 361, xa_mk_value(353), 0);
+ check_store_range(mt, 347, 352, xa_mk_value(347), 0);
+
+ check_load(mt, 346, xa_mk_value(346));
+ for (i = 347; i <= 352; i++)
+ check_load(mt, i, xa_mk_value(347));
+ for (i = 353; i <= 361; i++)
+ check_load(mt, i, xa_mk_value(353));
+ check_load(mt, 362, xa_mk_value(362));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 352, 364, NULL, 0);
+ check_store_range(mt, 351, 363, xa_mk_value(352), 0);
+ check_load(mt, 350, xa_mk_value(350));
+ check_load(mt, 351, xa_mk_value(352));
+ for (i = 352; i <= 363; i++)
+ check_load(mt, i, xa_mk_value(352));
+ check_load(mt, 364, NULL);
+ check_load(mt, 365, xa_mk_value(365));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(5);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 352, 364, NULL, 0);
+ check_store_range(mt, 351, 364, xa_mk_value(352), 0);
+ check_load(mt, 350, xa_mk_value(350));
+ check_load(mt, 351, xa_mk_value(352));
+ for (i = 352; i <= 364; i++)
+ check_load(mt, i, xa_mk_value(352));
+ check_load(mt, 365, xa_mk_value(365));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 362, 367, xa_mk_value(362), 0);
+ check_store_range(mt, 353, 361, xa_mk_value(353), 0);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+ /*
+ * Interesting cases:
+ * 1. Overwrite the end of a node and end in the first entry of the next
+ * node.
+ * 2. Split a single range
+ * 3. Overwrite the start of a range
+ * 4. Overwrite the end of a range
+ * 5. Overwrite the entire range
+ * 6. Overwrite a range that causes multiple parent nodes to be
+ * combined
+ * 7. Overwrite a range that causes multiple parent nodes and part of
+ * root to be combined
+ * 8. Overwrite the whole tree
+ * 9. Try to overwrite the zero entry of an alloc tree.
+ * 10. Write a range larger than a nodes current pivot
+ */
+
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i*5;
+ val2 = (i+1)*5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0);
+ check_store_range(mt, 2411, 2411, xa_mk_value(2411), 0);
+ check_store_range(mt, 2412, 2412, xa_mk_value(2412), 0);
+ check_store_range(mt, 2396, 2400, xa_mk_value(4052020), 0);
+ check_store_range(mt, 2402, 2402, xa_mk_value(2402), 0);
+ mtree_destroy(mt);
+ mt_set_non_kernel(0);
+
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i*5;
+ val2 = (i+1)*5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0);
+ check_store_range(mt, 2424, 2424, xa_mk_value(2424), 0);
+ check_store_range(mt, 2425, 2425, xa_mk_value(2), 0);
+ check_store_range(mt, 2460, 2470, NULL, 0);
+ check_store_range(mt, 2435, 2460, xa_mk_value(2435), 0);
+ check_store_range(mt, 2461, 2470, xa_mk_value(2461), 0);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Check in-place modifications */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ /* Append to the start of last range */
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i * 5 + 1;
+ val2 = val + 4;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Append to the last range without touching any boundaries */
+ for (i = 0; i < 10; i++) {
+ val = val2 + 5;
+ val2 = val + 4;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Append to the end of last range */
+ val = val2;
+ for (i = 0; i < 10; i++) {
+ val += 5;
+ MT_BUG_ON(mt, mtree_test_store_range(mt, val, ULONG_MAX,
+ xa_mk_value(val)) != 0);
+ }
+
+ /* Overwriting the range and over a part of the next range */
+ for (i = 10; i < 30; i += 2) {
+ val = i * 5 + 1;
+ val2 = val + 5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /* Overwriting a part of the range and over the next range */
+ for (i = 50; i < 70; i += 2) {
+ val2 = i * 5;
+ val = val2 - 5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges
+ */
+ for (i = 100; i < 130; i += 3) {
+ val = i * 5 - 5;
+ val2 = i * 5 + 1;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ /*
+ * Expand the range, only partially overwriting the previous and
+ * next ranges, in RCU mode
+ */
+ mt_set_in_rcu(mt);
+ for (i = 150; i < 180; i += 3) {
+ val = i * 5 - 5;
+ val2 = i * 5 + 1;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+
+ MT_BUG_ON(mt, !mt_height(mt));
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /* Test rebalance gaps */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 50; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 161, 161, xa_mk_value(161), 0);
+ check_store_range(mt, 162, 162, xa_mk_value(162), 0);
+ check_store_range(mt, 163, 163, xa_mk_value(163), 0);
+ check_store_range(mt, 240, 249, NULL, 0);
+ mtree_erase(mt, 200);
+ mtree_erase(mt, 210);
+ mtree_erase(mt, 220);
+ mtree_erase(mt, 230);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4600, 4959, xa_mk_value(1), 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0);
+ check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0);
+ check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0);
+ check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0);
+ check_store_range(mt, 4842, 4849, NULL, 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 1300; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ }
+ /* Cause a 3 child split all the way up the tree. */
+ for (i = 5; i < 215; i += 10)
+ check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0);
+ for (i = 5; i < 65; i += 10)
+ check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0);
+
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ for (i = 5; i < 45; i += 10)
+ check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0);
+ if (!MAPLE_32BIT)
+ MT_BUG_ON(mt, mt_height(mt) < 4);
+ mtree_destroy(mt);
+
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 1200; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ }
+ /* Fill parents and leaves before split. */
+ for (i = 5; i < 455; i += 10)
+ check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0);
+
+ for (i = 1; i < 16; i++)
+ check_store_range(mt, 8185 + i, 8185 + i + 1,
+ xa_mk_value(8185+i), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ /* triple split across multiple levels. */
+ check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0);
+ if (!MAPLE_32BIT)
+ MT_BUG_ON(mt, mt_height(mt) != 4);
+}
+
+static noinline void __init check_next_entry(struct maple_tree *mt)
+{
+ void *entry = NULL;
+ unsigned long limit = 30, i = 0;
+ MA_STATE(mas, mt, i, i);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ check_seq(mt, limit, false);
+ rcu_read_lock();
+
+ /* Check the first one and get ma_state in the correct state. */
+ MT_BUG_ON(mt, mas_walk(&mas) != xa_mk_value(i++));
+ for ( ; i <= limit + 1; i++) {
+ entry = mas_next(&mas, limit);
+ if (i > limit)
+ MT_BUG_ON(mt, entry != NULL);
+ else
+ MT_BUG_ON(mt, xa_mk_value(i) != entry);
+ }
+ rcu_read_unlock();
+ mtree_destroy(mt);
+}
+
+static noinline void __init check_prev_entry(struct maple_tree *mt)
+{
+ unsigned long index = 16;
+ void *value;
+ int i;
+
+ MA_STATE(mas, mt, index, index);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_seq(mt, 30, false);
+
+ rcu_read_lock();
+ value = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, value != xa_mk_value(index));
+ value = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, value != xa_mk_value(index - 1));
+ rcu_read_unlock();
+ mtree_destroy(mt);
+
+ /* Check limits on prev */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ for (i = 0; i <= index; i++) {
+ mas_set_range(&mas, i*10, i*10+5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ mas_set(&mas, 20);
+ value = mas_walk(&mas);
+ MT_BUG_ON(mt, value != xa_mk_value(2));
+
+ value = mas_prev(&mas, 19);
+ MT_BUG_ON(mt, value != NULL);
+
+ mas_set(&mas, 80);
+ value = mas_walk(&mas);
+ MT_BUG_ON(mt, value != xa_mk_value(8));
+
+ value = mas_prev(&mas, 76);
+ MT_BUG_ON(mt, value != NULL);
+
+ mas_unlock(&mas);
+}
+
+static noinline void __init check_root_expand(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ void *ptr;
+
+
+ mas_lock(&mas);
+ mas_set(&mas, 3);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ ptr = &check_prev_entry;
+ mas_set(&mas, 1);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+
+ mas_set(&mas, 0);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+
+ mas_set(&mas, 1);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != &check_prev_entry);
+
+ mas_set(&mas, 2);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+
+ mas_set(&mas, 0);
+ ptr = &check_prev_entry;
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+
+ mas_set(&mas, 5);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ mas_set_range(&mas, 0, 100);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != &check_prev_entry);
+ MT_BUG_ON(mt, mas.last != 0);
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+
+ mas_set(&mas, 0);
+ ptr = (void *)((unsigned long) check_prev_entry | 1UL);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+ ptr = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX));
+
+ mas_set(&mas, 1);
+ ptr = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
+ MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 1UL));
+
+ mas_unlock(&mas);
+
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+ mas_set(&mas, 0);
+ ptr = (void *)((unsigned long) check_prev_entry | 2UL);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+ ptr = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, (mas.index != ULONG_MAX) && (mas.last != ULONG_MAX));
+
+ mas_set(&mas, 1);
+ ptr = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
+ MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 2UL));
+
+
+ mas_unlock(&mas);
+}
+
+static noinline void __init check_gap_combining(struct maple_tree *mt)
+{
+ struct maple_enode *mn1, *mn2;
+ void *entry;
+ unsigned long singletons = 100;
+ static const unsigned long *seq100;
+ static const unsigned long seq100_64[] = {
+ /* 0-5 */
+ 74, 75, 76,
+ 50, 100, 2,
+
+ /* 6-12 */
+ 44, 45, 46, 43,
+ 20, 50, 3,
+
+ /* 13-20*/
+ 80, 81, 82,
+ 76, 2, 79, 85, 4,
+ };
+
+ static const unsigned long seq100_32[] = {
+ /* 0-5 */
+ 61, 62, 63,
+ 50, 100, 2,
+
+ /* 6-12 */
+ 31, 32, 33, 30,
+ 20, 50, 3,
+
+ /* 13-20*/
+ 80, 81, 82,
+ 76, 2, 79, 85, 4,
+ };
+
+ static const unsigned long seq2000[] = {
+ 1152, 1151,
+ 1100, 1200, 2,
+ };
+ static const unsigned long seq400[] = {
+ 286, 318,
+ 256, 260, 266, 270, 275, 280, 290, 398,
+ 286, 310,
+ };
+
+ unsigned long index;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ if (MAPLE_32BIT)
+ seq100 = seq100_32;
+ else
+ seq100 = seq100_64;
+
+ index = seq100[0];
+ mas_set(&mas, index);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_seq(mt, singletons, false); /* create 100 singletons. */
+
+ mt_set_non_kernel(1);
+ mtree_test_erase(mt, seq100[2]);
+ check_load(mt, seq100[2], NULL);
+ mtree_test_erase(mt, seq100[1]);
+ check_load(mt, seq100[1], NULL);
+
+ rcu_read_lock();
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index));
+ mn1 = mas.node;
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mn2 = mas.node;
+ MT_BUG_ON(mt, mn1 == mn2); /* test the test. */
+
+ /*
+ * At this point, there is a gap of 2 at index + 1 between seq100[3] and
+ * seq100[4]. Search for the gap.
+ */
+ mt_set_non_kernel(1);
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[3], seq100[4],
+ seq100[5]));
+ MT_BUG_ON(mt, mas.index != index + 1);
+ rcu_read_unlock();
+
+ mtree_test_erase(mt, seq100[6]);
+ check_load(mt, seq100[6], NULL);
+ mtree_test_erase(mt, seq100[7]);
+ check_load(mt, seq100[7], NULL);
+ mtree_test_erase(mt, seq100[8]);
+ index = seq100[9];
+
+ rcu_read_lock();
+ mas.index = index;
+ mas.last = index;
+ mas_reset(&mas);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index));
+ mn1 = mas.node;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mas_next(&mas, ULONG_MAX); /* go to the next entry. */
+ mn2 = mas.node;
+ MT_BUG_ON(mt, mn1 == mn2); /* test the next entry is in the next node. */
+
+ /*
+ * At this point, there is a gap of 3 at seq100[6]. Find it by
+ * searching 20 - 50 for size 3.
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[10], seq100[11],
+ seq100[12]));
+ MT_BUG_ON(mt, mas.index != seq100[6]);
+ rcu_read_unlock();
+
+ mt_set_non_kernel(1);
+ mtree_store(mt, seq100[13], NULL, GFP_KERNEL);
+ check_load(mt, seq100[13], NULL);
+ check_load(mt, seq100[14], xa_mk_value(seq100[14]));
+ mtree_store(mt, seq100[14], NULL, GFP_KERNEL);
+ check_load(mt, seq100[13], NULL);
+ check_load(mt, seq100[14], NULL);
+
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[15],
+ seq100[17]));
+ MT_BUG_ON(mt, mas.index != seq100[13]);
+ mt_validate(mt);
+ rcu_read_unlock();
+
+ /*
+ * *DEPRECATED: no retries anymore* Test retry entry in the start of a
+ * gap.
+ */
+ mt_set_non_kernel(2);
+ mtree_test_store_range(mt, seq100[18], seq100[14], NULL);
+ mtree_test_erase(mt, seq100[15]);
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[19],
+ seq100[20]));
+ rcu_read_unlock();
+ MT_BUG_ON(mt, mas.index != seq100[18]);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* seq 2000 tests are for multi-level tree gaps */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 2000, false);
+ mt_set_non_kernel(1);
+ mtree_test_erase(mt, seq2000[0]);
+ mtree_test_erase(mt, seq2000[1]);
+
+ mt_set_non_kernel(2);
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq2000[2], seq2000[3],
+ seq2000[4]));
+ MT_BUG_ON(mt, mas.index != seq2000[1]);
+ rcu_read_unlock();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* seq 400 tests rebalancing over two levels. */
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 400, false);
+ mtree_test_store_range(mt, seq400[0], seq400[1], NULL);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 400, false);
+ mt_set_non_kernel(50);
+ mtree_test_store_range(mt, seq400[2], seq400[9],
+ xa_mk_value(seq400[2]));
+ mtree_test_store_range(mt, seq400[3], seq400[9],
+ xa_mk_value(seq400[3]));
+ mtree_test_store_range(mt, seq400[4], seq400[9],
+ xa_mk_value(seq400[4]));
+ mtree_test_store_range(mt, seq400[5], seq400[9],
+ xa_mk_value(seq400[5]));
+ mtree_test_store_range(mt, seq400[0], seq400[9],
+ xa_mk_value(seq400[0]));
+ mtree_test_store_range(mt, seq400[6], seq400[9],
+ xa_mk_value(seq400[6]));
+ mtree_test_store_range(mt, seq400[7], seq400[9],
+ xa_mk_value(seq400[7]));
+ mtree_test_store_range(mt, seq400[8], seq400[9],
+ xa_mk_value(seq400[8]));
+ mtree_test_store_range(mt, seq400[10], seq400[11],
+ xa_mk_value(seq400[10]));
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+}
+static noinline void __init check_node_overwrite(struct maple_tree *mt)
+{
+ int i, max = 4000;
+
+ for (i = 0; i < max; i++)
+ mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100));
+
+ mtree_test_store_range(mt, 319951, 367950, NULL);
+ /*mt_dump(mt, mt_dump_dec); */
+ mt_validate(mt);
+}
+
+#if defined(BENCH_SLOT_STORE)
+static noinline void __init bench_slot_store(struct maple_tree *mt)
+{
+ int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, brk, brk, NULL, GFP_KERNEL);
+ mtree_store_range(mt, brk_start, brk, xa_mk_value(brk),
+ GFP_KERNEL);
+ }
+}
+#endif
+
+#if defined(BENCH_NODE_STORE)
+static noinline void __init bench_node_store(struct maple_tree *mt)
+{
+ int i, overwrite = 76, max = 240, count = 20000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, overwrite, overwrite + 15,
+ xa_mk_value(overwrite), GFP_KERNEL);
+
+ overwrite += 5;
+ if (overwrite >= 135)
+ overwrite = 76;
+ }
+}
+#endif
+
+#if defined(BENCH_AWALK)
+static noinline void __init bench_awalk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 50000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ mtree_store_range(mt, 1470, 1475, NULL, GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_empty_area_rev(&mas, 0, 2000, 10);
+ mas_reset(&mas);
+ }
+}
+#endif
+#if defined(BENCH_WALK)
+static noinline void __init bench_walk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_walk(&mas);
+ mas_reset(&mas);
+ }
+
+}
+#endif
+
+#if defined(BENCH_MT_FOR_EACH)
+static noinline void __init bench_mt_for_each(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500, index = 0;
+ void *entry;
+
+ for (i = 0; i < max; i += 5)
+ mtree_store_range(mt, i, i + 4, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ unsigned long j = 0;
+
+ mt_for_each(mt, entry, index, max) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j += 5;
+ }
+
+ index = 0;
+ }
+
+}
+#endif
+
+#if defined(BENCH_MAS_FOR_EACH)
+static noinline void __init bench_mas_for_each(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500;
+ void *entry;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < max; i += 5) {
+ int gap = 4;
+
+ if (i % 30 == 0)
+ gap = 3;
+ mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ unsigned long j = 0;
+
+ mas_for_each(&mas, entry, max) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j += 5;
+ }
+ mas_set(&mas, 0);
+ }
+ rcu_read_unlock();
+
+}
+#endif
+#if defined(BENCH_MAS_PREV)
+static noinline void __init bench_mas_prev(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500;
+ void *entry;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < max; i += 5) {
+ int gap = 4;
+
+ if (i % 30 == 0)
+ gap = 3;
+ mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < count; i++) {
+ unsigned long j = 2495;
+
+ mas_set(&mas, ULONG_MAX);
+ while ((entry = mas_prev(&mas, 0)) != NULL) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j -= 5;
+ }
+ }
+ rcu_read_unlock();
+
+}
+#endif
+/* check_forking - simulate the kernel forking sequence with the tree. */
+static noinline void __init check_forking(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 134;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+ newmas.tree = &newmt;
+ mas_reset(&newmas);
+ mas_reset(&mas);
+ down_write(&newmt_lock);
+ mas.index = 0;
+ mas.last = 0;
+ if (mas_expected_entries(&newmas, nr_entries)) {
+ pr_err("OOM!");
+ BUG_ON(1);
+ }
+ rcu_read_lock();
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, val);
+ }
+ rcu_read_unlock();
+ mas_destroy(&newmas);
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
+}
+
+static noinline void __init check_iteration(struct maple_tree *mt)
+{
+ int i, nr_entries = 125;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i * 10, i * 10 + 9,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+
+ i = 0;
+ mas_lock(&mas);
+ mas_for_each(&mas, val, 925) {
+ MT_BUG_ON(mt, mas.index != i * 10);
+ MT_BUG_ON(mt, mas.last != i * 10 + 9);
+ /* Overwrite end of entry 92 */
+ if (i == 92) {
+ mas.index = 925;
+ mas.last = 929;
+ mas_store(&mas, val);
+ }
+ i++;
+ }
+ /* Ensure mas_find() gets the next value */
+ val = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != xa_mk_value(i));
+
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 785) {
+ MT_BUG_ON(mt, mas.index != i * 10);
+ MT_BUG_ON(mt, mas.last != i * 10 + 9);
+ /* Overwrite start of entry 78 */
+ if (i == 78) {
+ mas.index = 780;
+ mas.last = 785;
+ mas_store(&mas, val);
+ } else {
+ i++;
+ }
+ }
+ val = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != xa_mk_value(i));
+
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 765) {
+ MT_BUG_ON(mt, mas.index != i * 10);
+ MT_BUG_ON(mt, mas.last != i * 10 + 9);
+ /* Overwrite end of entry 76 and advance to the end */
+ if (i == 76) {
+ mas.index = 760;
+ mas.last = 765;
+ mas_store(&mas, val);
+ }
+ i++;
+ }
+ /* Make sure the next find returns the one after 765, 766-769 */
+ val = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != xa_mk_value(76));
+ mas_unlock(&mas);
+ mas_destroy(&mas);
+ mt_set_non_kernel(0);
+}
+
+static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 135;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ newmas.tree = &newmt;
+ rcu_read_lock();
+ mas_lock(&newmas);
+ mas_reset(&newmas);
+ mas_set(&mas, 0);
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store_gfp(&newmas, val, GFP_KERNEL);
+ }
+ mas_unlock(&newmas);
+ rcu_read_unlock();
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ mtree_destroy(&newmt);
+}
+
+#if defined(BENCH_FORK)
+static noinline void __init bench_forking(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 134, nr_fork = 80000;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < nr_fork; i++) {
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ newmas.tree = &newmt;
+ mas_reset(&newmas);
+ mas_reset(&mas);
+ mas.index = 0;
+ mas.last = 0;
+ rcu_read_lock();
+ down_write(&newmt_lock);
+ if (mas_expected_entries(&newmas, nr_entries)) {
+ printk("OOM!");
+ BUG_ON(1);
+ }
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, val);
+ }
+ mas_destroy(&newmas);
+ rcu_read_unlock();
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
+ }
+}
+#endif
+
+static noinline void __init next_prev_test(struct maple_tree *mt)
+{
+ int i, nr_entries;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ struct maple_enode *mn;
+ static const unsigned long *level2;
+ static const unsigned long level2_64[] = { 707, 1000, 710, 715, 720,
+ 725};
+ static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755,
+ 1760, 1765};
+ unsigned long last_index;
+
+ if (MAPLE_32BIT) {
+ nr_entries = 500;
+ level2 = level2_32;
+ last_index = 0x138e;
+ } else {
+ nr_entries = 200;
+ level2 = level2_64;
+ last_index = 0x7d6;
+ }
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mas_lock(&mas);
+ for (i = 0; i <= nr_entries / 2; i++) {
+ mas_next(&mas, 1000);
+ if (mas_is_none(&mas))
+ break;
+
+ }
+ mas_reset(&mas);
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 1000) {
+ i++;
+ }
+
+ mas_reset(&mas);
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 1000) {
+ mas_pause(&mas);
+ i++;
+ }
+
+ /*
+ * 680 - 685 = 0x61a00001930c
+ * 686 - 689 = NULL;
+ * 690 - 695 = 0x61a00001930c
+ * Check simple next/prev
+ */
+ mas_set(&mas, 686);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != NULL);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
+ MT_BUG_ON(mt, mas.index != 690);
+ MT_BUG_ON(mt, mas.last != 695);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(680 / 10));
+ MT_BUG_ON(mt, mas.index != 680);
+ MT_BUG_ON(mt, mas.last != 685);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
+ MT_BUG_ON(mt, mas.index != 690);
+ MT_BUG_ON(mt, mas.last != 695);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(700 / 10));
+ MT_BUG_ON(mt, mas.index != 700);
+ MT_BUG_ON(mt, mas.last != 705);
+
+ /* Check across node boundaries of the tree */
+ mas_set(&mas, 70);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
+ MT_BUG_ON(mt, mas.index != 70);
+ MT_BUG_ON(mt, mas.last != 75);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(80 / 10));
+ MT_BUG_ON(mt, mas.index != 80);
+ MT_BUG_ON(mt, mas.last != 85);
+
+ val = mas_prev(&mas, 70);
+ MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
+ MT_BUG_ON(mt, mas.index != 70);
+ MT_BUG_ON(mt, mas.last != 75);
+
+ /* Check across two levels of the tree */
+ mas_reset(&mas);
+ mas_set(&mas, level2[0]);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != NULL);
+ val = mas_next(&mas, level2[1]);
+ MT_BUG_ON(mt, val != xa_mk_value(level2[2] / 10));
+ MT_BUG_ON(mt, mas.index != level2[2]);
+ MT_BUG_ON(mt, mas.last != level2[3]);
+ mn = mas.node;
+
+ val = mas_next(&mas, level2[1]);
+ MT_BUG_ON(mt, val != xa_mk_value(level2[4] / 10));
+ MT_BUG_ON(mt, mas.index != level2[4]);
+ MT_BUG_ON(mt, mas.last != level2[5]);
+ MT_BUG_ON(mt, mn == mas.node);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(level2[2] / 10));
+ MT_BUG_ON(mt, mas.index != level2[2]);
+ MT_BUG_ON(mt, mas.last != level2[3]);
+
+ /* Check running off the end and back on */
+ mas_set(&mas, nr_entries * 10);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(nr_entries));
+ MT_BUG_ON(mt, mas.index != (nr_entries * 10));
+ MT_BUG_ON(mt, mas.last != (nr_entries * 10 + 5));
+
+ val = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != last_index);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(nr_entries));
+ MT_BUG_ON(mt, mas.index != (nr_entries * 10));
+ MT_BUG_ON(mt, mas.last != (nr_entries * 10 + 5));
+
+ /* Check running off the start and back on */
+ mas_reset(&mas);
+ mas_set(&mas, 10);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(1));
+ MT_BUG_ON(mt, mas.index != 10);
+ MT_BUG_ON(mt, mas.last != 15);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(0));
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 5);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 5);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ mas.index = 0;
+ mas.last = 5;
+ mas_store(&mas, NULL);
+ mas_reset(&mas);
+ mas_set(&mas, 10);
+ mas_walk(&mas);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 9);
+ mas_unlock(&mas);
+
+ mtree_destroy(mt);
+
+ mt_init(mt);
+ mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL);
+ mtree_store_range(mt, 5, 5, xa_mk_value(5), GFP_KERNEL);
+ rcu_read_lock();
+ mas_set(&mas, 5);
+ val = mas_prev(&mas, 4);
+ MT_BUG_ON(mt, val != NULL);
+ rcu_read_unlock();
+}
+
+
+
+/* Test spanning writes that require balancing right sibling or right cousin */
+static noinline void __init check_spanning_relatives(struct maple_tree *mt)
+{
+
+ unsigned long i, nr_entries = 1000;
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+
+ mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL);
+}
+
+static noinline void __init check_fuzzer(struct maple_tree *mt)
+{
+ /*
+ * 1. Causes a spanning rebalance of a single root node.
+ * Fixed by setting the correct limit in mast_cp_to_nodes() when the
+ * entire right side is consumed.
+ */
+ mtree_test_insert(mt, 88, (void *)0xb1);
+ mtree_test_insert(mt, 84, (void *)0xa9);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 14, (void *)0x1d);
+ mtree_test_insert(mt, 7, (void *)0xf);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_insert(mt, 18, (void *)0x25);
+ mtree_test_store_range(mt, 8, 18, (void *)0x11);
+ mtree_destroy(mt);
+
+
+ /*
+ * 2. Cause a spanning rebalance of two nodes in root.
+ * Fixed by setting mast->r->max correctly.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 87, (void *)0xaf);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 4);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_store(mt, 8, (void *)0x11);
+ mtree_test_store(mt, 44, (void *)0x59);
+ mtree_test_store(mt, 68, (void *)0x89);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 43, (void *)0x57);
+ mtree_test_insert(mt, 24, (void *)0x31);
+ mtree_test_insert(mt, 844, (void *)0x699);
+ mtree_test_store(mt, 84, (void *)0xa9);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 4);
+ mtree_test_load(mt, 5);
+ mtree_test_erase(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 3. Cause a node overflow on copy
+ * Fixed by using the correct check for node size in mas_wr_modify()
+ * Also discovered issue with metadata setting.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store_range(mt, 0, ULONG_MAX, (void *)0x1);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 5);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 4);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 5);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 5);
+ mtree_test_erase(mt, 4);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_store(mt, 444, (void *)0x379);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 0);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 4. spanning store failure due to writing incorrect pivot value at
+ * last slot.
+ * Fixed by setting mast->r->max correctly in mast_cp_to_nodes()
+ *
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_insert(mt, 261, (void *)0x20b);
+ mtree_test_store(mt, 516, (void *)0x409);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_insert(mt, 1256, (void *)0x9d1);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 1);
+ mtree_test_store(mt, 56, (void *)0x71);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 24, (void *)0x31);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2263, (void *)0x11af);
+ mtree_test_insert(mt, 446, (void *)0x37d);
+ mtree_test_store_range(mt, 6, 45, (void *)0xd);
+ mtree_test_store_range(mt, 3, 446, (void *)0x7);
+ mtree_destroy(mt);
+
+ /*
+ * 5. mas_wr_extend_null() may overflow slots.
+ * Fix by checking against wr_mas->node_end.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 48, (void *)0x61);
+ mtree_test_store(mt, 3, (void *)0x7);
+ mtree_test_load(mt, 0);
+ mtree_test_store(mt, 88, (void *)0xb1);
+ mtree_test_store(mt, 81, (void *)0xa3);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 2480, (void *)0x1361);
+ mtree_test_insert(mt, ULONG_MAX,
+ (void *)0xffffffffffffffff);
+ mtree_test_erase(mt, ULONG_MAX);
+ mtree_destroy(mt);
+
+ /*
+ * 6. When reusing a node with an implied pivot and the node is
+ * shrinking, old data would be left in the implied slot
+ * Fixed by checking the last pivot for the mas->max and clear
+ * accordingly. This only affected the left-most node as that node is
+ * the only one allowed to end in NULL.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_load(mt, 2);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_load(mt, 2);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_store_range(mt, 4, 62, (void *)0x9);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 0, (void *)0x3);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 15, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_insert(mt, 122, (void *)0xf5);
+ mtree_test_store(mt, 3, (void *)0x7);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_store_range(mt, 0, 1, (void *)0x1);
+ mtree_test_insert(mt, 85, (void *)0xab);
+ mtree_test_insert(mt, 72, (void *)0x91);
+ mtree_test_insert(mt, 81, (void *)0xa3);
+ mtree_test_insert(mt, 726, (void *)0x5ad);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 51, (void *)0x67);
+ mtree_test_insert(mt, 611, (void *)0x4c7);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 26, 1, (void *)0x35);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 22, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 53);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 1, (void *)0x3);
+ mtree_test_insert(mt, 222, (void *)0x1bd);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 0);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 621, (void *)0x4db);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_erase(mt, 5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 0, (void *)0x3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store_range(mt, 4, 62, (void *)0x9);
+ mtree_test_erase(mt, 62);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 22, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 53);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 1, (void *)0x3);
+ mtree_test_insert(mt, 222, (void *)0x1bd);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 0);
+ mtree_test_load(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 7. Previous fix was incomplete, fix mas_resuse_node() clearing of old
+ * data by overwriting it first - that way metadata is of no concern.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_load(mt, 1);
+ mtree_test_insert(mt, 102, (void *)0xcd);
+ mtree_test_erase(mt, 2);
+ mtree_test_erase(mt, 0);
+ mtree_test_load(mt, 0);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 110, (void *)0xdd);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 5, 0, (void *)0xb);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store(mt, 112, (void *)0xe1);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 110, 2, (void *)0xdd);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 22);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 210, (void *)0x1a5);
+ mtree_test_store_range(mt, 0, 2, (void *)0x1);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_erase(mt, 22);
+ mtree_test_erase(mt, 1);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 112);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 1, 2, (void *)0x3);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert_range(mt, 1, 2, (void *)0x3);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 112);
+ mtree_test_store_range(mt, 110, 12, (void *)0xdd);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 110);
+ mtree_test_insert_range(mt, 4, 71, (void *)0x9);
+ mtree_test_load(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_insert_range(mt, 11, 22, (void *)0x17);
+ mtree_test_erase(mt, 12);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 22);
+ mtree_destroy(mt);
+
+
+ /*
+ * 8. When rebalancing or spanning_rebalance(), the max of the new node
+ * may be set incorrectly to the final pivot and not the right max.
+ * Fix by setting the left max to orig right max if the entire node is
+ * consumed.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 6716, (void *)0x3479);
+ mtree_test_store(mt, 61, (void *)0x7b);
+ mtree_test_insert(mt, 13, (void *)0x1b);
+ mtree_test_store(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 0);
+ mtree_test_erase(mt, 67167);
+ mtree_test_insert_range(mt, 6, 7167, (void *)0xd);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_erase(mt, 67);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 667167);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_erase(mt, 67);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 67167, (void *)0x20cbf);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 7);
+ mtree_test_insert(mt, 16, (void *)0x21);
+ mtree_test_insert(mt, 36, (void *)0x49);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 367, (void *)0x2df);
+ mtree_test_insert(mt, 115, (void *)0xe7);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store_range(mt, 1, 3, (void *)0x3);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 67167);
+ mtree_test_insert_range(mt, 6, 47, (void *)0xd);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 1, 67, (void *)0x3);
+ mtree_test_load(mt, 67);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 67167);
+ mtree_destroy(mt);
+
+ /*
+ * 9. spanning store to the end of data caused an invalid metadata
+ * length which resulted in a crash eventually.
+ * Fix by checking if there is a value in pivot before incrementing the
+ * metadata end in mab_mas_cp(). To ensure this doesn't happen again,
+ * abstract the two locations this happens into a function called
+ * mas_leaf_set_meta().
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 91, (void *)0xb7);
+ mtree_test_insert(mt, 18, (void *)0x25);
+ mtree_test_insert(mt, 81, (void *)0xa3);
+ mtree_test_store_range(mt, 0, 128, (void *)0x1);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 8);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, ULONG_MAX - 10, (void *)0xffffffffffffffeb);
+ mtree_test_erase(mt, ULONG_MAX - 10);
+ mtree_test_store_range(mt, 0, 281, (void *)0x1);
+ mtree_test_erase(mt, 2);
+ mtree_test_insert(mt, 1211, (void *)0x977);
+ mtree_test_insert(mt, 111, (void *)0xdf);
+ mtree_test_insert(mt, 13, (void *)0x1b);
+ mtree_test_insert(mt, 211, (void *)0x1a7);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_insert(mt, 1218, (void *)0x985);
+ mtree_test_insert(mt, 61, (void *)0x7b);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 121, (void *)0xf3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, ULONG_MAX - 10, (void *)0xffffffffffffffeb);
+ mtree_test_erase(mt, ULONG_MAX - 10);
+}
+
+/* duplicate the tree with a specific gap */
+static noinline void __init check_dup_gaps(struct maple_tree *mt,
+ unsigned long nr_entries, bool zero_start,
+ unsigned long gap)
+{
+ unsigned long i = 0;
+ struct maple_tree newmt;
+ int ret;
+ void *tmp;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ if (!zero_start)
+ i = 1;
+
+ mt_zero_nr_tallocated();
+ for (; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, (i+1)*10 - gap,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_non_kernel(99999);
+ down_write(&newmt_lock);
+ ret = mas_expected_entries(&newmas, nr_entries);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, ret != 0);
+
+ rcu_read_lock();
+ mas_for_each(&mas, tmp, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, tmp);
+ }
+ rcu_read_unlock();
+ mas_destroy(&newmas);
+
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
+}
+
+/* Duplicate many sizes of trees. Mainly to test expected entry values */
+static noinline void __init check_dup(struct maple_tree *mt)
+{
+ int i;
+ int big_start = 100010;
+
+ /* Check with a value at zero */
+ for (i = 10; i < 1000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Check with a value at zero, no gap */
+ for (i = 1000; i < 2000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 0);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Check with a value at zero and unreasonably large */
+ for (i = big_start; i < big_start + 10; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Small to medium size not starting at zero*/
+ for (i = 200; i < 1000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ }
+
+ cond_resched();
+ mt_cache_shrink();
+ /* Unreasonably large not starting at zero*/
+ for (i = big_start; i < big_start + 10; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ cond_resched();
+ mt_cache_shrink();
+ }
+
+ /* Check non-allocation tree not starting at zero */
+ for (i = 1500; i < 3000; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ cond_resched();
+ if (i % 2 == 0)
+ mt_cache_shrink();
+ }
+
+ mt_cache_shrink();
+ /* Check non-allocation tree starting at zero */
+ for (i = 200; i < 1000; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ cond_resched();
+ }
+
+ mt_cache_shrink();
+ /* Unreasonably large */
+ for (i = big_start + 5; i < big_start + 10; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ rcu_barrier();
+ mt_cache_shrink();
+ cond_resched();
+ }
+}
+
+static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
+{
+ int i = 50;
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(9999);
+ mas_lock(&mas);
+ do {
+ mas_set_range(&mas, i*10, i*10+9);
+ mas_store(&mas, check_bnode_min_spanning);
+ } while (i--);
+
+ mas_set_range(&mas, 240, 509);
+ mas_store(&mas, NULL);
+ mas_unlock(&mas);
+ mas_destroy(&mas);
+ mt_set_non_kernel(0);
+}
+
+static noinline void __init check_empty_area_window(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 20;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 1; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 9,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* Create another hole besides the one at 0 */
+ mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL);
+
+ /* Check lower bounds that don't fit */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY);
+
+ /* Check lower bound that does fit */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0);
+ MT_BUG_ON(mt, mas.index != 5);
+ MT_BUG_ON(mt, mas.last != 9);
+ rcu_read_unlock();
+
+ /* Check one gap that doesn't fit and one that does */
+ rcu_read_lock();
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 161);
+ MT_BUG_ON(mt, mas.last != 169);
+
+ /* Check one gap that does fit above the min */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0);
+ MT_BUG_ON(mt, mas.index != 216);
+ MT_BUG_ON(mt, mas.last != 218);
+
+ /* Check size that doesn't fit any gap */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the lower end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY);
+
+ /*
+ * Check size that doesn't fit the upper end of the window but
+ * does fit the gap
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY);
+
+ /* Check mas_empty_area forward */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 8);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EINVAL);
+
+ mas_reset(&mas);
+ mas_empty_area(&mas, 100, 165, 3);
+
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY);
+ rcu_read_unlock();
+}
+
+static noinline void __init check_empty_area_fill(struct maple_tree *mt)
+{
+ const unsigned long max = 0x25D78000;
+ unsigned long size;
+ int loop, shift;
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(99999);
+ for (shift = 12; shift <= 16; shift++) {
+ loop = 5000;
+ size = 1 << shift;
+ while (loop--) {
+ mas_set(&mas, 0);
+ mas_lock(&mas);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
+ MT_BUG_ON(mt, mas.last != mas.index + size - 1);
+ mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
+ mas_unlock(&mas);
+ mas_reset(&mas);
+ }
+ }
+
+ /* No space left. */
+ size = 0x1000;
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
+ rcu_read_unlock();
+
+ /* Fill a depth 3 node to the maximum */
+ for (unsigned long i = 629440511; i <= 629440800; i += 6)
+ mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
+ /* Make space in the second-last depth 4 node */
+ mtree_erase(mt, 631668735);
+ /* Make space in the last depth 4 node */
+ mtree_erase(mt, 629506047);
+ mas_reset(&mas);
+ /* Search from just after the gap in the second-last depth 4 */
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
+ rcu_read_unlock();
+ mt_set_non_kernel(0);
+}
+
+/*
+ * Check MAS_START, MAS_PAUSE, active (implied), and MAS_NONE transitions.
+ *
+ * The table below shows the single entry tree (0-0 pointer) and normal tree
+ * with nodes.
+ *
+ * Function ENTRY Start Result index & last
+ * ┬ ┬ ┬ ┬ ┬
+ * │ │ │ │ └─ the final range
+ * │ │ │ └─ The node value after execution
+ * │ │ └─ The node value before execution
+ * │ └─ If the entry exists or does not exists (DNE)
+ * └─ The function name
+ *
+ * Function ENTRY Start Result index & last
+ * mas_next()
+ * - after last
+ * Single entry tree at 0-0
+ * ------------------------
+ * DNE MAS_START MAS_NONE 1 - oo
+ * DNE MAS_PAUSE MAS_NONE 1 - oo
+ * DNE MAS_ROOT MAS_NONE 1 - oo
+ * when index = 0
+ * DNE MAS_NONE MAS_ROOT 0
+ * when index > 0
+ * DNE MAS_NONE MAS_NONE 1 - oo
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to last range
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to last range
+ * exists MAS_NONE active range
+ * exists active active range
+ * DNE active active set to last range
+ * ERANGE active MAS_OVERFLOW last range
+ *
+ * Function ENTRY Start Result index & last
+ * mas_prev()
+ * - before index
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ *
+ * if index == 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to min
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to min
+ * exists MAS_NONE active range
+ * DNE MAS_NONE MAS_NONE set to min
+ * any MAS_ROOT MAS_NONE 0
+ * exists active active range
+ * DNE active active last range
+ * ERANGE active MAS_UNDERFLOW last range
+ *
+ * Function ENTRY Start Result index & last
+ * mas_find()
+ * - at index or next
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 1
+ * if index == 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to max
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to max
+ * exists MAS_NONE active range (start at last)
+ * exists active active range
+ * DNE active active last range (max < last)
+ *
+ * Function ENTRY Start Result index & last
+ * mas_find_rev()
+ * - at index or before
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ * if index == 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to min
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to min
+ * exists MAS_NONE active range (start at index)
+ * exists active active range
+ * DNE active active last range (min > index)
+ *
+ * Function ENTRY Start Result index & last
+ * mas_walk()
+ * - Look up index
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * DNE MAS_START MAS_ROOT 1 - oo
+ * DNE MAS_PAUSE MAS_ROOT 1 - oo
+ * DNE MAS_NONE MAS_ROOT 1 - oo
+ * DNE MAS_ROOT MAS_ROOT 1 - oo
+ * if index == 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ * exists MAS_ROOT MAS_ROOT 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active range of NULL
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active range of NULL
+ * exists MAS_NONE active range
+ * DNE MAS_NONE active range of NULL
+ * exists active active range
+ * DNE active active range of NULL
+ */
+
+#define mas_active(x) (((x).node != MAS_ROOT) && \
+ ((x).node != MAS_START) && \
+ ((x).node != MAS_PAUSE) && \
+ ((x).node != MAS_NONE))
+static noinline void __init check_state_handling(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ void *entry, *ptr = (void *) 0x1234500;
+ void *ptr2 = &ptr;
+ void *ptr3 = &ptr2;
+
+ /* Check MAS_ROOT First */
+ mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
+
+ mas_lock(&mas);
+ /* prev: Start -> underflow*/
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* prev: Start -> root */
+ mas_set(&mas, 10);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* prev: pause -> root */
+ mas_set(&mas, 10);
+ mas_pause(&mas);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* next: start -> none */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* next: start -> none*/
+ mas_set(&mas, 10);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* find: root -> none */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find: none -> none */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find_rev: none -> root */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* find_rev: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* find_rev: root -> none */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find_rev: none -> none */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find_rev: start -> root */
+ mas_set(&mas, 10);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: pause -> none*/
+ mas_set(&mas, 10);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: none -> none */
+ mas.index = mas.last = 10;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: none -> none */
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: pause -> root */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: none -> root */
+ mas.node = MAS_NONE;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: root -> root */
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: root -> none */
+ mas_set(&mas, 10);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: none -> root */
+ mas.index = mas.last = 0;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ mas_unlock(&mas);
+
+ /* Check when there is an actual node */
+ mtree_store_range(mt, 0, 0, NULL, GFP_KERNEL);
+ mtree_store_range(mt, 0x1000, 0x1500, ptr, GFP_KERNEL);
+ mtree_store_range(mt, 0x2000, 0x2500, ptr2, GFP_KERNEL);
+ mtree_store_range(mt, 0x3000, 0x3500, ptr3, GFP_KERNEL);
+
+ mas_lock(&mas);
+
+ /* next: start ->active */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next: pause ->active */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next: none ->active */
+ mas.index = mas.last = 0;
+ mas.offset = 0;
+ mas.node = MAS_NONE;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active ->active */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> active beyond data */
+ entry = mas_next(&mas, 0x2999);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x2501);
+ MT_BUG_ON(mt, mas.last != 0x2fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* Continue after last range ends after max */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> active continued */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> overflow */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+
+ /* next:overflow -> overflow */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+
+ /* prev:overflow -> active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next: none -> active, skip value at location */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ mas.node = MAS_NONE;
+ mas.offset = 0;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active ->active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> active spanning end range */
+ entry = mas_prev(&mas, 0x0100);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> underflow */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* prev:underflow -> underflow */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* next:underflow -> active */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:first value -> underflow */
+ entry = mas_prev(&mas, 0x1000);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* find:underflow -> first value */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev: pause ->active */
+ mas_set(&mas, 0x3600);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ mas_pause(&mas);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> active spanning min */
+ entry = mas_prev(&mas, 0x1600);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev: active ->active, continue */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: start ->active */
+ mas_set(&mas, 0);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: pause ->active */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: start ->active on value */;
+ mas_set(&mas, 1200);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find:active ->active */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+
+ /* find:active -> active (NULL)*/
+ entry = mas_find(&mas, 0x2700);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x2501);
+ MT_BUG_ON(mt, mas.last != 0x2FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: overflow ->active */
+ entry = mas_find(&mas, 0x5000);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find:active -> active (NULL) end*/
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev: active (END) ->active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev:active ->active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev: pause ->active */
+ mas_pause(&mas);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev:active -> active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev: start ->active */
+ mas_set(&mas, 0x1200);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk start ->active */
+ mas_set(&mas, 0x1200);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk start ->active */
+ mas_set(&mas, 0x1600);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk pause ->active */
+ mas_set(&mas, 0x1200);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk pause -> active */
+ mas_set(&mas, 0x1600);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk none -> active */
+ mas_set(&mas, 0x1200);
+ mas.node = MAS_NONE;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk none -> active */
+ mas_set(&mas, 0x1600);
+ mas.node = MAS_NONE;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk active -> active */
+ mas.index = 0x1200;
+ mas.last = 0x1200;
+ mas.offset = 0;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk active -> active */
+ mas.index = 0x1600;
+ mas.last = 0x1600;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ mas_unlock(&mas);
+}
+
+static DEFINE_MTREE(tree);
+static int __init maple_tree_seed(void)
+{
+ unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 5003, 5002};
+ void *ptr = &set;
+
+ pr_info("\nTEST STARTING\n\n");
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_root_expand(&tree);
+ mtree_destroy(&tree);
+
+#if defined(BENCH_SLOT_STORE)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_slot_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_NODE_STORE)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_node_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_AWALK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_awalk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_WALK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_walk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_FORK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_forking(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MT_FOR_EACH)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mt_for_each(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MAS_FOR_EACH)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mas_for_each(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MAS_PREV)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mas_prev(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_iteration(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_forking(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_mas_store_gfp(&tree);
+ mtree_destroy(&tree);
+
+ /* Test ranges (store and insert) */
+ mt_init_flags(&tree, 0);
+ check_ranges(&tree);
+ mtree_destroy(&tree);
+
+#if defined(CONFIG_64BIT)
+ /* These tests have ranges outside of 4GB */
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_alloc_range(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_alloc_rev_range(&tree);
+ mtree_destroy(&tree);
+#endif
+
+ mt_init_flags(&tree, 0);
+
+ check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
+
+ check_insert(&tree, set[9], &tree); /* Insert 0 */
+ check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
+ check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
+
+ check_insert(&tree, set[10], ptr); /* Insert 5003 */
+ check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
+ check_load(&tree, set[11], NULL); /* See if 5002 -> NULL */
+ check_load(&tree, set[10], ptr); /* See if 5003 -> ptr */
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ /* Try to insert, insert a dup, and load back what was inserted. */
+ mt_init_flags(&tree, 0);
+ check_insert(&tree, set[0], &tree); /* Insert 5015 */
+ check_dup_insert(&tree, set[0], &tree); /* Insert 5015 again */
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+
+ /*
+ * Second set of tests try to load a value that doesn't exist, inserts
+ * a second value, then loads the value again
+ */
+ check_load(&tree, set[1], NULL); /* See if 5014 -> NULL */
+ check_insert(&tree, set[1], ptr); /* insert 5014 -> ptr */
+ check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+ /*
+ * Tree currently contains:
+ * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
+ */
+ check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
+ check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
+
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+ check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
+ check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
+ check_load(&tree, set[7], &tree); /* 1003 = &tree ? */
+
+ /* Clear out tree */
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ /* Test inserting into a NULL hole. */
+ check_insert(&tree, set[5], ptr); /* insert 1001 -> ptr */
+ check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
+ check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
+ check_load(&tree, set[5], ptr); /* See if 1001 -> ptr */
+ check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
+ check_load(&tree, set[7], &tree); /* See if 1003 -> &tree */
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ /*
+ * set[] = {5015, 5014, 5017, 25, 1000,
+ * 1001, 1002, 1003, 1005, 0,
+ * 5003, 5002};
+ */
+
+ check_insert(&tree, set[0], ptr); /* 5015 */
+ check_insert(&tree, set[1], &tree); /* 5014 */
+ check_insert(&tree, set[2], ptr); /* 5017 */
+ check_insert(&tree, set[3], &tree); /* 25 */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_insert(&tree, set[4], ptr); /* 1000 < Should split. */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); /*25 */
+ check_load(&tree, set[4], ptr);
+ check_insert(&tree, set[5], &tree); /* 1001 */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_insert(&tree, set[6], ptr);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_insert(&tree, set[7], &tree);
+ check_load(&tree, set[0], ptr);
+ check_insert(&tree, set[8], ptr);
+
+ check_insert(&tree, set[9], &tree);
+
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_load(&tree, set[9], &tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_seq(&tree, 16, false);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_seq(&tree, 1000, true);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rev_seq(&tree, 1000, true);
+ mtree_destroy(&tree);
+
+ check_lower_bound_split(&tree);
+ check_upper_bound_split(&tree);
+ check_mid_split(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_next_entry(&tree);
+ check_find(&tree);
+ check_find_2(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_prev_entry(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_gap_combining(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_node_overwrite(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ next_prev_test(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_spanning_relatives(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rev_find(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_fuzzer(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_dup(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_bnode_min_spanning(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_window(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_empty_area_fill(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_state_handling(&tree);
+ mtree_destroy(&tree);
+
+#if defined(BENCH)
+skip:
+#endif
+ rcu_barrier();
+ pr_info("maple_tree: %u of %u tests passed\n",
+ atomic_read(&maple_tree_tests_passed),
+ atomic_read(&maple_tree_tests_run));
+ if (atomic_read(&maple_tree_tests_run) ==
+ atomic_read(&maple_tree_tests_passed))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void __exit maple_tree_harvest(void)
+{
+
+}
+
+module_init(maple_tree_seed);
+module_exit(maple_tree_harvest);
+MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
new file mode 100644
index 000000000..849c477d4
--- /dev/null
+++ b/lib/test_memcat_p.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcat_p() in lib/memcat_p.c
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct test_struct {
+ int num;
+ unsigned int magic;
+};
+
+#define MAGIC 0xf00ff00f
+/* Size of each of the NULL-terminated input arrays */
+#define INPUT_MAX 128
+/* Expected number of non-NULL elements in the output array */
+#define EXPECT (INPUT_MAX * 2 - 2)
+
+static int __init test_memcat_p_init(void)
+{
+ struct test_struct **in0, **in1, **out, **p;
+ int err = -ENOMEM, i, r, total = 0;
+
+ in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
+ if (!in0)
+ return err;
+
+ in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
+ if (!in1)
+ goto err_free_in0;
+
+ for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
+ in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
+ if (!in0[i])
+ goto err_free_elements;
+
+ in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
+ if (!in1[i]) {
+ kfree(in0[i]);
+ goto err_free_elements;
+ }
+
+ /* lifted from test_sort.c */
+ r = (r * 725861) % 6599;
+ in0[i]->num = r;
+ in1[i]->num = -r;
+ in0[i]->magic = MAGIC;
+ in1[i]->magic = MAGIC;
+ }
+
+ in0[i] = in1[i] = NULL;
+
+ out = memcat_p(in0, in1);
+ if (!out)
+ goto err_free_all_elements;
+
+ err = -EINVAL;
+ for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
+ total += (*p)->num;
+
+ if ((*p)->magic != MAGIC) {
+ pr_err("test failed: wrong magic at %d: %u\n", i,
+ (*p)->magic);
+ goto err_free_out;
+ }
+ }
+
+ if (total) {
+ pr_err("test failed: expected zero total, got %d\n", total);
+ goto err_free_out;
+ }
+
+ if (i != EXPECT) {
+ pr_err("test failed: expected output size %d, got %d\n",
+ EXPECT, i);
+ goto err_free_out;
+ }
+
+ for (i = 0; i < INPUT_MAX - 1; i++)
+ if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) {
+ pr_err("test failed: wrong element order at %d\n", i);
+ goto err_free_out;
+ }
+
+ err = 0;
+ pr_info("test passed\n");
+
+err_free_out:
+ kfree(out);
+err_free_all_elements:
+ i = INPUT_MAX;
+err_free_elements:
+ for (i--; i >= 0; i--) {
+ kfree(in1[i]);
+ kfree(in0[i]);
+ }
+
+ kfree(in1);
+err_free_in0:
+ kfree(in0);
+
+ return err;
+}
+
+static void __exit test_memcat_p_exit(void)
+{
+}
+
+module_init(test_memcat_p_init);
+module_exit(test_memcat_p_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
new file mode 100644
index 000000000..0ae35223d
--- /dev/null
+++ b/lib/test_meminit.c
@@ -0,0 +1,439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for SL[AOU]B/page initialization at alloc/free time.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define GARBAGE_INT (0x09A7BA9E)
+#define GARBAGE_BYTE (0x9E)
+
+#define REPORT_FAILURES_IN_FN() \
+ do { \
+ if (failures) \
+ pr_info("%s failed %d out of %d times\n", \
+ __func__, failures, num_tests); \
+ else \
+ pr_info("all %d tests in %s passed\n", \
+ num_tests, __func__); \
+ } while (0)
+
+/* Calculate the number of uninitialized bytes in the buffer. */
+static int __init count_nonzero_bytes(void *ptr, size_t size)
+{
+ int i, ret = 0;
+ unsigned char *p = (unsigned char *)ptr;
+
+ for (i = 0; i < size; i++)
+ if (p[i])
+ ret++;
+ return ret;
+}
+
+/* Fill a buffer with garbage, skipping |skip| first bytes. */
+static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
+{
+ unsigned int *p = (unsigned int *)((char *)ptr + skip);
+ int i = 0;
+
+ WARN_ON(skip > size);
+ size -= skip;
+
+ while (size >= sizeof(*p)) {
+ p[i] = GARBAGE_INT;
+ i++;
+ size -= sizeof(*p);
+ }
+ if (size)
+ memset(&p[i], GARBAGE_BYTE, size);
+}
+
+static void __init fill_with_garbage(void *ptr, size_t size)
+{
+ fill_with_garbage_skip(ptr, size, 0);
+}
+
+static int __init do_alloc_pages_order(int order, int *total_failures)
+{
+ struct page *page;
+ void *buf;
+ size_t size = PAGE_SIZE << order;
+
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ goto err;
+ buf = page_address(page);
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+
+ page = alloc_pages(GFP_KERNEL, order);
+ if (!page)
+ goto err;
+ buf = page_address(page);
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ __free_pages(page, order);
+ return 1;
+err:
+ (*total_failures)++;
+ return 1;
+}
+
+/* Test the page allocator by calling alloc_pages with different orders. */
+static int __init test_pages(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i;
+
+ for (i = 0; i <= MAX_ORDER; i++)
+ num_tests += do_alloc_pages_order(i, &failures);
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test kmalloc() with given parameters. */
+static int __init do_kmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto err;
+ fill_with_garbage(buf, size);
+ kfree(buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto err;
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ kfree(buf);
+ return 1;
+err:
+ (*total_failures)++;
+ return 1;
+}
+
+/* Test vmalloc() with given parameters. */
+static int __init do_vmalloc_size(size_t size, int *total_failures)
+{
+ void *buf;
+
+ buf = vmalloc(size);
+ if (!buf)
+ goto err;
+ fill_with_garbage(buf, size);
+ vfree(buf);
+
+ buf = vmalloc(size);
+ if (!buf)
+ goto err;
+ if (count_nonzero_bytes(buf, size))
+ (*total_failures)++;
+ fill_with_garbage(buf, size);
+ vfree(buf);
+ return 1;
+err:
+ (*total_failures)++;
+ return 1;
+}
+
+/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
+static int __init test_kvmalloc(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 20; i++) {
+ size = 1 << i;
+ num_tests += do_kmalloc_size(size, &failures);
+ num_tests += do_vmalloc_size(size, &failures);
+ }
+
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+#define CTOR_BYTES (sizeof(unsigned int))
+#define CTOR_PATTERN (0x41414141)
+/* Initialize the first 4 bytes of the object. */
+static void test_ctor(void *obj)
+{
+ *(unsigned int *)obj = CTOR_PATTERN;
+}
+
+/*
+ * Check the invariants for the buffer allocated from a slab cache.
+ * If the cache has a test constructor, the first 4 bytes of the object must
+ * always remain equal to CTOR_PATTERN.
+ * If the cache isn't an RCU-typesafe one, or if the allocation is done with
+ * __GFP_ZERO, then the object contents must be zeroed after allocation.
+ * If the cache is an RCU-typesafe one, the object contents must never be
+ * zeroed after the first use. This is checked by memcmp() in
+ * do_kmem_cache_size().
+ */
+static bool __init check_buf(void *buf, int size, bool want_ctor,
+ bool want_rcu, bool want_zero)
+{
+ int bytes;
+ bool fail = false;
+
+ bytes = count_nonzero_bytes(buf, size);
+ WARN_ON(want_ctor && want_zero);
+ if (want_zero)
+ return bytes;
+ if (want_ctor) {
+ if (*(unsigned int *)buf != CTOR_PATTERN)
+ fail = 1;
+ } else {
+ if (bytes)
+ fail = !want_rcu;
+ }
+ return fail;
+}
+
+#define BULK_SIZE 100
+static void *bulk_array[BULK_SIZE];
+
+/*
+ * Test kmem_cache with given parameters:
+ * want_ctor - use a constructor;
+ * want_rcu - use SLAB_TYPESAFE_BY_RCU;
+ * want_zero - use __GFP_ZERO.
+ */
+static int __init do_kmem_cache_size(size_t size, bool want_ctor,
+ bool want_rcu, bool want_zero,
+ int *total_failures)
+{
+ struct kmem_cache *c;
+ int iter;
+ bool fail = false;
+ gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
+ void *buf, *buf_copy;
+
+ c = kmem_cache_create("test_cache", size, 1,
+ want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
+ want_ctor ? test_ctor : NULL);
+ for (iter = 0; iter < 10; iter++) {
+ /* Do a test of bulk allocations */
+ if (!want_rcu && !want_ctor) {
+ int ret;
+
+ ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
+ if (!ret) {
+ fail = true;
+ } else {
+ int i;
+ for (i = 0; i < ret; i++)
+ fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
+ kmem_cache_free_bulk(c, ret, bulk_array);
+ }
+ }
+
+ buf = kmem_cache_alloc(c, alloc_mask);
+ /* Check that buf is zeroed, if it must be. */
+ fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
+ fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
+
+ if (!want_rcu) {
+ kmem_cache_free(c, buf);
+ continue;
+ }
+
+ /*
+ * If this is an RCU cache, use a critical section to ensure we
+ * can touch objects after they're freed.
+ */
+ rcu_read_lock();
+ /*
+ * Copy the buffer to check that it's not wiped on
+ * free().
+ */
+ buf_copy = kmalloc(size, GFP_ATOMIC);
+ if (buf_copy)
+ memcpy(buf_copy, buf, size);
+
+ kmem_cache_free(c, buf);
+ /*
+ * Check that |buf| is intact after kmem_cache_free().
+ * |want_zero| is false, because we wrote garbage to
+ * the buffer already.
+ */
+ fail |= check_buf(buf, size, want_ctor, want_rcu,
+ false);
+ if (buf_copy) {
+ fail |= (bool)memcmp(buf, buf_copy, size);
+ kfree(buf_copy);
+ }
+ rcu_read_unlock();
+ }
+ kmem_cache_destroy(c);
+
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Check that the data written to an RCU-allocated object survives
+ * reallocation.
+ */
+static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ void *buf, *buf_contents, *saved_ptr;
+ void **used_objects;
+ int i, iter, maxiter = 1024;
+ bool fail = false;
+
+ c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ saved_ptr = buf;
+ fill_with_garbage(buf, size);
+ buf_contents = kmalloc(size, GFP_KERNEL);
+ if (!buf_contents) {
+ kmem_cache_free(c, buf);
+ goto out;
+ }
+ used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
+ if (!used_objects) {
+ kmem_cache_free(c, buf);
+ kfree(buf_contents);
+ goto out;
+ }
+ memcpy(buf_contents, buf, size);
+ kmem_cache_free(c, buf);
+ /*
+ * Run for a fixed number of iterations. If we never hit saved_ptr,
+ * assume the test passes.
+ */
+ for (iter = 0; iter < maxiter; iter++) {
+ buf = kmem_cache_alloc(c, GFP_KERNEL);
+ used_objects[iter] = buf;
+ if (buf == saved_ptr) {
+ fail = memcmp(buf_contents, buf, size);
+ for (i = 0; i <= iter; i++)
+ kmem_cache_free(c, used_objects[i]);
+ goto free_out;
+ }
+ }
+
+ for (iter = 0; iter < maxiter; iter++)
+ kmem_cache_free(c, used_objects[iter]);
+
+free_out:
+ kfree(buf_contents);
+ kfree(used_objects);
+out:
+ kmem_cache_destroy(c);
+ *total_failures += fail;
+ return 1;
+}
+
+static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
+{
+ struct kmem_cache *c;
+ int i, iter, maxiter = 1024;
+ int num, bytes;
+ bool fail = false;
+ void *objects[10];
+
+ c = kmem_cache_create("test_cache", size, size, 0, NULL);
+ for (iter = 0; (iter < maxiter) && !fail; iter++) {
+ num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
+ objects);
+ for (i = 0; i < num; i++) {
+ bytes = count_nonzero_bytes(objects[i], size);
+ if (bytes)
+ fail = true;
+ fill_with_garbage(objects[i], size);
+ }
+
+ if (num)
+ kmem_cache_free_bulk(c, num, objects);
+ }
+ kmem_cache_destroy(c);
+ *total_failures += fail;
+ return 1;
+}
+
+/*
+ * Test kmem_cache allocation by creating caches of different sizes, with and
+ * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
+ */
+static int __init test_kmemcache(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, flags, size;
+ bool ctor, rcu, zero;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ for (flags = 0; flags < 8; flags++) {
+ ctor = flags & 1;
+ rcu = flags & 2;
+ zero = flags & 4;
+ if (ctor & zero)
+ continue;
+ num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
+ &failures);
+ }
+ num_tests += do_kmem_cache_size_bulk(size, &failures);
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
+static int __init test_rcu_persistent(int *total_failures)
+{
+ int failures = 0, num_tests = 0;
+ int i, size;
+
+ for (i = 0; i < 10; i++) {
+ size = 8 << i;
+ num_tests += do_kmem_cache_rcu_persistent(size, &failures);
+ }
+ REPORT_FAILURES_IN_FN();
+ *total_failures += failures;
+ return num_tests;
+}
+
+/*
+ * Run the tests. Each test function returns the number of executed tests and
+ * updates |failures| with the number of failed tests.
+ */
+static int __init test_meminit_init(void)
+{
+ int failures = 0, num_tests = 0;
+
+ num_tests += test_pages(&failures);
+ num_tests += test_kvmalloc(&failures);
+ num_tests += test_kmemcache(&failures);
+ num_tests += test_rcu_persistent(&failures);
+
+ if (failures == 0)
+ pr_info("all %d tests passed!\n", num_tests);
+ else
+ pr_info("failures: %d out of %d\n", failures, num_tests);
+
+ return failures ? -EINVAL : 0;
+}
+module_init(test_meminit_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
new file mode 100644
index 000000000..7b01b4387
--- /dev/null
+++ b/lib/test_min_heap.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "min_heap_test: " fmt
+
+/*
+ * Test cases for the min max heap.
+ */
+
+#include <linux/log2.h>
+#include <linux/min_heap.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+
+static __init bool less_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs < *(int *)rhs;
+}
+
+static __init bool greater_than(const void *lhs, const void *rhs)
+{
+ return *(int *)lhs > *(int *)rhs;
+}
+
+static __init void swap_ints(void *lhs, void *rhs)
+{
+ int temp = *(int *)lhs;
+
+ *(int *)lhs = *(int *)rhs;
+ *(int *)rhs = temp;
+}
+
+static __init int pop_verify_heap(bool min_heap,
+ struct min_heap *heap,
+ const struct min_heap_callbacks *funcs)
+{
+ int *values = heap->data;
+ int err = 0;
+ int last;
+
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ while (heap->nr > 0) {
+ if (min_heap) {
+ if (last > values[0]) {
+ pr_err("error: expected %d <= %d\n", last,
+ values[0]);
+ err++;
+ }
+ } else {
+ if (last < values[0]) {
+ pr_err("error: expected %d >= %d\n", last,
+ values[0]);
+ err++;
+ }
+ }
+ last = values[0];
+ min_heap_pop(heap, funcs);
+ }
+ return err;
+}
+
+static __init int test_heapify_all(bool min_heap)
+{
+ int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
+ -3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
+ struct min_heap heap = {
+ .data = values,
+ .nr = ARRAY_SIZE(values),
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, err;
+
+ /* Test with known set of values. */
+ min_heapify_all(&heap, &funcs);
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+
+ /* Test with randomly generated values. */
+ heap.nr = ARRAY_SIZE(values);
+ for (i = 0; i < heap.nr; i++)
+ values[i] = get_random_u32();
+
+ min_heapify_all(&heap, &funcs);
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ /* Test with randomly generated values. */
+ while (heap.nr < heap.size) {
+ temp = get_random_u32();
+ min_heap_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static __init int test_heap_pop_push(bool min_heap)
+{
+ const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
+ -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
+ int values[ARRAY_SIZE(data)];
+ struct min_heap heap = {
+ .data = values,
+ .nr = 0,
+ .size = ARRAY_SIZE(values),
+ };
+ struct min_heap_callbacks funcs = {
+ .elem_size = sizeof(int),
+ .less = min_heap ? less_than : greater_than,
+ .swp = swap_ints,
+ };
+ int i, temp, err;
+
+ /* Fill values with data to pop and replace. */
+ temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with known set of values copied from data. */
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_pop_push(&heap, &data[i], &funcs);
+
+ err = pop_verify_heap(min_heap, &heap, &funcs);
+
+ heap.nr = 0;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ min_heap_push(&heap, &temp, &funcs);
+
+ /* Test with randomly generated values. */
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ temp = get_random_u32();
+ min_heap_pop_push(&heap, &temp, &funcs);
+ }
+ err += pop_verify_heap(min_heap, &heap, &funcs);
+
+ return err;
+}
+
+static int __init test_min_heap_init(void)
+{
+ int err = 0;
+
+ err += test_heapify_all(true);
+ err += test_heapify_all(false);
+ err += test_heap_push(true);
+ err += test_heap_push(false);
+ err += test_heap_pop_push(true);
+ err += test_heap_pop_push(false);
+ if (err) {
+ pr_err("test failed with %d errors\n", err);
+ return -EINVAL;
+ }
+ pr_info("test passed\n");
+ return 0;
+}
+module_init(test_min_heap_init);
+
+static void __exit test_min_heap_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_min_heap_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_module.c b/lib/test_module.c
new file mode 100644
index 000000000..debd19e35
--- /dev/null
+++ b/lib/test_module.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This module emits "Hello, world" on printk when loaded.
+ *
+ * It is designed to be used for basic evaluation of the module loading
+ * subsystem (for example when validating module signing/verification). It
+ * lacks any extra dependencies, and will not normally be loaded by the
+ * system unless explicitly requested by name.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+static int __init test_module_init(void)
+{
+ pr_warn("Hello, world\n");
+
+ return 0;
+}
+
+module_init(test_module_init);
+
+static void __exit test_module_exit(void)
+{
+ pr_warn("Goodbye\n");
+}
+
+module_exit(test_module_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
new file mode 100644
index 000000000..c0c957c50
--- /dev/null
+++ b/lib/test_objagg.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/objagg.h>
+
+struct tokey {
+ unsigned int id;
+};
+
+#define NUM_KEYS 32
+
+static int key_id_index(unsigned int key_id)
+{
+ if (key_id >= NUM_KEYS) {
+ WARN_ON(1);
+ return 0;
+ }
+ return key_id;
+}
+
+#define BUF_LEN 128
+
+struct world {
+ unsigned int root_count;
+ unsigned int delta_count;
+ char next_root_buf[BUF_LEN];
+ struct objagg_obj *objagg_objs[NUM_KEYS];
+ unsigned int key_refs[NUM_KEYS];
+};
+
+struct root {
+ struct tokey key;
+ char buf[BUF_LEN];
+};
+
+struct delta {
+ unsigned int key_id_diff;
+};
+
+static struct objagg_obj *world_obj_get(struct world *world,
+ struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+ struct tokey key;
+ int err;
+
+ key.id = key_id;
+ objagg_obj = objagg_obj_get(objagg, &key);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return objagg_obj;
+ }
+ if (!world->key_refs[key_id_index(key_id)]) {
+ world->objagg_objs[key_id_index(key_id)] = objagg_obj;
+ } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
+ pr_err("Key %u: God another object for the same key.\n",
+ key_id);
+ err = -EINVAL;
+ goto err_key_id_check;
+ }
+ world->key_refs[key_id_index(key_id)]++;
+ return objagg_obj;
+
+err_key_id_check:
+ objagg_obj_put(objagg, objagg_obj);
+ return ERR_PTR(err);
+}
+
+static void world_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+
+ if (!world->key_refs[key_id_index(key_id)])
+ return;
+ objagg_obj = world->objagg_objs[key_id_index(key_id)];
+ objagg_obj_put(objagg, objagg_obj);
+ world->key_refs[key_id_index(key_id)]--;
+}
+
+#define MAX_KEY_ID_DIFF 5
+
+static bool delta_check(void *priv, const void *parent_obj, const void *obj)
+{
+ const struct tokey *parent_key = parent_obj;
+ const struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+
+ return diff >= 0 && diff <= MAX_KEY_ID_DIFF;
+}
+
+static void *delta_create(void *priv, void *parent_obj, void *obj)
+{
+ struct tokey *parent_key = parent_obj;
+ struct world *world = priv;
+ struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+ struct delta *delta;
+
+ if (!delta_check(priv, parent_obj, obj))
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->key_id_diff = diff;
+ world->delta_count++;
+ return delta;
+}
+
+static void delta_destroy(void *priv, void *delta_priv)
+{
+ struct delta *delta = delta_priv;
+ struct world *world = priv;
+
+ world->delta_count--;
+ kfree(delta);
+}
+
+static void *root_create(void *priv, void *obj, unsigned int id)
+{
+ struct world *world = priv;
+ struct tokey *key = obj;
+ struct root *root;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+ memcpy(&root->key, key, sizeof(root->key));
+ memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
+ world->root_count++;
+ return root;
+}
+
+static void root_destroy(void *priv, void *root_priv)
+{
+ struct root *root = root_priv;
+ struct world *world = priv;
+
+ world->root_count--;
+ kfree(root);
+}
+
+static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_create_root)
+{
+ unsigned int orig_root_count = world->root_count;
+ struct objagg_obj *objagg_obj;
+ const struct root *root;
+ int err;
+
+ if (should_create_root)
+ get_random_bytes(world->next_root_buf,
+ sizeof(world->next_root_buf));
+
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return PTR_ERR(objagg_obj);
+ }
+ if (should_create_root) {
+ if (world->root_count != orig_root_count + 1) {
+ pr_err("Key %u: Root was not created\n", key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly created\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ }
+ root = objagg_obj_root_priv(objagg_obj);
+ if (root->key.id != key_id) {
+ pr_err("Key %u: Root has unexpected key id\n", key_id);
+ err = -EINVAL;
+ goto err_check_key_id;
+ }
+ if (should_create_root &&
+ memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
+ pr_err("Key %u: Buffer does not match the expected content\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_buf;
+ }
+ return 0;
+
+err_check_buf:
+err_check_key_id:
+err_check_root_count:
+ objagg_obj_put(objagg, objagg_obj);
+ return err;
+}
+
+static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_destroy_root)
+{
+ unsigned int orig_root_count = world->root_count;
+
+ world_obj_put(world, objagg, key_id);
+
+ if (should_destroy_root) {
+ if (world->root_count != orig_root_count - 1) {
+ pr_err("Key %u: Root was not destroyed\n", key_id);
+ return -EINVAL;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly destroyed\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int check_stats_zero(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int err = 0;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != 0) {
+ pr_err("Stats: Object count is not zero while it should be\n");
+ err = -EINVAL;
+ }
+
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int check_stats_nodelta(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int i;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != NUM_KEYS) {
+ pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
+ NUM_KEYS, stats->stats_info_count);
+ err = -EINVAL;
+ goto stats_put;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ if (stats->stats_info[i].stats.user_count != 2) {
+ pr_err("Stats: incorrect user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ if (stats->stats_info[i].stats.delta_user_count != 2) {
+ pr_err("Stats: incorrect delta user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ }
+ err = 0;
+
+stats_put:
+ objagg_stats_put(stats);
+ return err;
+}
+
+static bool delta_check_dummy(void *priv, const void *parent_obj,
+ const void *obj)
+{
+ return false;
+}
+
+static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void delta_destroy_dummy(void *priv, void *delta_priv)
+{
+}
+
+static const struct objagg_ops nodelta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_check = delta_check_dummy,
+ .delta_create = delta_create_dummy,
+ .delta_destroy = delta_destroy_dummy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+static int test_nodelta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&nodelta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_first_zero;
+
+ /* First round of gets, the root objects should be created */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, true);
+ if (err)
+ goto err_obj_first_get;
+ }
+
+ /* Do the second round of gets, all roots are already created,
+ * make sure that no new root is created
+ */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, false);
+ if (err)
+ goto err_obj_second_get;
+ }
+
+ err = check_stats_nodelta(objagg);
+ if (err)
+ goto err_stats_nodelta;
+
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, false);
+ if (err)
+ goto err_obj_first_put;
+ }
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, true);
+ if (err)
+ goto err_obj_second_put;
+ }
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_second_zero;
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_stats_nodelta:
+err_obj_first_put:
+err_obj_second_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+
+ i = NUM_KEYS;
+err_obj_first_get:
+err_obj_second_put:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+err_stats_first_zero:
+err_stats_second_zero:
+ objagg_destroy(objagg);
+ return err;
+}
+
+static const struct objagg_ops delta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_check = delta_check,
+ .delta_create = delta_create,
+ .delta_destroy = delta_destroy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+enum action {
+ ACTION_GET,
+ ACTION_PUT,
+};
+
+enum expect_delta {
+ EXPECT_DELTA_SAME,
+ EXPECT_DELTA_INC,
+ EXPECT_DELTA_DEC,
+};
+
+enum expect_root {
+ EXPECT_ROOT_SAME,
+ EXPECT_ROOT_INC,
+ EXPECT_ROOT_DEC,
+};
+
+struct expect_stats_info {
+ struct objagg_obj_stats stats;
+ bool is_root;
+ unsigned int key_id;
+};
+
+struct expect_stats {
+ unsigned int info_count;
+ struct expect_stats_info info[NUM_KEYS];
+};
+
+struct action_item {
+ unsigned int key_id;
+ enum action action;
+ enum expect_delta expect_delta;
+ enum expect_root expect_root;
+ struct expect_stats expect_stats;
+};
+
+#define EXPECT_STATS(count, ...) \
+{ \
+ .info_count = count, \
+ .info = { __VA_ARGS__ } \
+}
+
+#define ROOT(key_id, user_count, delta_user_count) \
+ {{user_count, delta_user_count}, true, key_id}
+
+#define DELTA(key_id, user_count) \
+ {{user_count, user_count}, false, key_id}
+
+static const struct action_item action_items[] = {
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(1, ROOT(1, 1, 1)),
+ }, /* r: 1 d: */
+ {
+ 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
+ }, /* r: 1, 7 d: */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
+ DELTA(3, 1)),
+ }, /* r: 1, 7 d: 3^1 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
+ DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 5^1 */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30 d: 8^7, 8^7 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7 */
+ {
+ 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 1), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 7, 30, 5 d: 6^5, 8^5 */
+ {
+ 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 30, 5 d: 6^5, 8^5 */
+ {
+ 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(5, 1, 3),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 5 d: 6^5, 8^5 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(5, 0, 2),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: d: 6^5, 8^5 */
+ {
+ 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(2, ROOT(5, 0, 1),
+ DELTA(8, 1)),
+ }, /* r: d: 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(0, ),
+ }, /* r: d: */
+};
+
+static int check_expect(struct world *world,
+ const struct action_item *action_item,
+ unsigned int orig_delta_count,
+ unsigned int orig_root_count)
+{
+ unsigned int key_id = action_item->key_id;
+
+ switch (action_item->expect_delta) {
+ case EXPECT_DELTA_SAME:
+ if (orig_delta_count != world->delta_count) {
+ pr_err("Key %u: Delta count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_delta_count + 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_delta_count - 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ switch (action_item->expect_root) {
+ case EXPECT_ROOT_SAME:
+ if (orig_root_count != world->root_count) {
+ pr_err("Key %u: Root count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_root_count + 1 != world->root_count) {
+ pr_err("Key %u: Root count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_root_count - 1 != world->root_count) {
+ pr_err("Key %u: Root count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
+{
+ const struct tokey *root_key;
+ const struct delta *delta;
+ unsigned int key_id;
+
+ root_key = objagg_obj_root_priv(objagg_obj);
+ key_id = root_key->id;
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (delta)
+ key_id += delta->key_id_diff;
+ return key_id;
+}
+
+static int
+check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (stats_info->is_root != expect_stats_info->is_root) {
+ if (errmsg)
+ *errmsg = "Incorrect root/delta indication";
+ return -EINVAL;
+ }
+ if (stats_info->stats.user_count !=
+ expect_stats_info->stats.user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect user count";
+ return -EINVAL;
+ }
+ if (stats_info->stats.delta_user_count !=
+ expect_stats_info->stats.delta_user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect delta user count";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (obj_to_key_id(stats_info->objagg_obj) !=
+ expect_stats_info->key_id) {
+ if (errmsg)
+ *errmsg = "incorrect key id";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int check_expect_stats_neigh(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ int pos)
+{
+ int i;
+ int err;
+
+ for (i = pos - 1; i >= 0; i--) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ for (i = pos + 1; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __check_expect_stats(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ int i;
+ int err;
+
+ if (stats->stats_info_count != expect_stats->info_count) {
+ *errmsg = "Unexpected object count";
+ return -EINVAL;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err)
+ return err;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err) {
+ /* It is possible that one of the neighbor stats with
+ * same numbers have the correct key id, so check it
+ */
+ err = check_expect_stats_neigh(stats, expect_stats, i);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int check_expect_stats(struct objagg *objagg,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats)) {
+ *errmsg = "objagg_stats_get() failed.";
+ return PTR_ERR(stats);
+ }
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_delta_action_item(struct world *world,
+ struct objagg *objagg,
+ const struct action_item *action_item,
+ bool inverse)
+{
+ unsigned int orig_delta_count = world->delta_count;
+ unsigned int orig_root_count = world->root_count;
+ unsigned int key_id = action_item->key_id;
+ enum action action = action_item->action;
+ struct objagg_obj *objagg_obj;
+ const char *errmsg;
+ int err;
+
+ if (inverse)
+ action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
+
+ switch (action) {
+ case ACTION_GET:
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj))
+ return PTR_ERR(objagg_obj);
+ break;
+ case ACTION_PUT:
+ world_obj_put(world, objagg, key_id);
+ break;
+ }
+
+ if (inverse)
+ return 0;
+ err = check_expect(world, action_item,
+ orig_delta_count, orig_root_count);
+ if (err)
+ goto errout;
+
+ err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ /* This can only happen when action is not inversed.
+ * So in case of an error, cleanup by doing inverse action.
+ */
+ test_delta_action_item(world, objagg, action_item, true);
+ return err;
+}
+
+static int test_delta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < ARRAY_SIZE(action_items); i++) {
+ err = test_delta_action_item(&world, objagg,
+ &action_items[i], false);
+ if (err)
+ goto err_do_action_item;
+ }
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_do_action_item:
+ for (i--; i >= 0; i--)
+ test_delta_action_item(&world, objagg, &action_items[i], true);
+
+ objagg_destroy(objagg);
+ return err;
+}
+
+struct hints_case {
+ const unsigned int *key_ids;
+ size_t key_ids_count;
+ struct expect_stats expect_stats;
+ struct expect_stats expect_stats_hints;
+};
+
+static const unsigned int hints_case_key_ids[] = {
+ 1, 7, 3, 5, 3, 1, 30, 8, 8, 5, 6, 8,
+};
+
+static const struct hints_case hints_case = {
+ .key_ids = hints_case_key_ids,
+ .key_ids_count = ARRAY_SIZE(hints_case_key_ids),
+ .expect_stats =
+ EXPECT_STATS(7, ROOT(1, 2, 7), ROOT(7, 1, 4), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(3, 2),
+ DELTA(5, 2), DELTA(6, 1)),
+ .expect_stats_hints =
+ EXPECT_STATS(7, ROOT(3, 2, 9), ROOT(1, 2, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(5, 2),
+ DELTA(6, 1), DELTA(7, 1)),
+};
+
+static void __pr_debug_stats(const struct objagg_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < stats->stats_info_count; i++)
+ pr_debug("Stat index %d key %u: u %d, d %d, %s\n", i,
+ obj_to_key_id(stats->stats_info[i].objagg_obj),
+ stats->stats_info[i].stats.user_count,
+ stats->stats_info[i].stats.delta_user_count,
+ stats->stats_info[i].is_root ? "root" : "noroot");
+}
+
+static void pr_debug_stats(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return;
+ __pr_debug_stats(stats);
+ objagg_stats_put(stats);
+}
+
+static void pr_debug_hints_stats(struct objagg_hints *objagg_hints)
+{
+ const struct objagg_stats *stats;
+
+ stats = objagg_hints_stats_get(objagg_hints);
+ if (IS_ERR(stats))
+ return;
+ __pr_debug_stats(stats);
+ objagg_stats_put(stats);
+}
+
+static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_hints_stats_get(objagg_hints);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_hints_case(const struct hints_case *hints_case)
+{
+ struct objagg_obj *objagg_obj;
+ struct objagg_hints *hints;
+ struct world world2 = {};
+ struct world world = {};
+ struct objagg *objagg2;
+ struct objagg *objagg;
+ const char *errmsg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world, objagg,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg);
+ err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Stats: %s\n", errmsg);
+ goto err_check_expect_stats;
+ }
+
+ hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ if (IS_ERR(hints)) {
+ err = PTR_ERR(hints);
+ goto err_hints_get;
+ }
+
+ pr_debug_hints_stats(hints);
+ err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
+ &errmsg);
+ if (err) {
+ pr_err("Hints stats: %s\n", errmsg);
+ goto err_check_expect_hints_stats;
+ }
+
+ objagg2 = objagg_create(&delta_ops, hints, &world2);
+ if (IS_ERR(objagg2))
+ return PTR_ERR(objagg2);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world2, objagg2,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world2_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg2);
+ err = check_expect_stats(objagg2, &hints_case->expect_stats_hints,
+ &errmsg);
+ if (err) {
+ pr_err("Stats2: %s\n", errmsg);
+ goto err_check_expect_stats2;
+ }
+
+ err = 0;
+
+err_check_expect_stats2:
+err_world2_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world2, objagg, hints_case->key_ids[i]);
+ i = hints_case->key_ids_count;
+ objagg_destroy(objagg2);
+err_check_expect_hints_stats:
+ objagg_hints_put(hints);
+err_hints_get:
+err_check_expect_stats:
+err_world_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, hints_case->key_ids[i]);
+
+ objagg_destroy(objagg);
+ return err;
+}
+static int test_hints(void)
+{
+ return test_hints_case(&hints_case);
+}
+
+static int __init test_objagg_init(void)
+{
+ int err;
+
+ err = test_nodelta();
+ if (err)
+ return err;
+ err = test_delta();
+ if (err)
+ return err;
+ return test_hints();
+}
+
+static void __exit test_objagg_exit(void)
+{
+}
+
+module_init(test_objagg_init);
+module_exit(test_objagg_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for objagg");
diff --git a/lib/test_parman.c b/lib/test_parman.c
new file mode 100644
index 000000000..35e322436
--- /dev/null
+++ b/lib/test_parman.c
@@ -0,0 +1,395 @@
+/*
+ * lib/test_parman.c - Test module for parman
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/parman.h>
+
+#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */
+#define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT)
+#define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1)
+
+#define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number
+ * of items for testing
+ */
+#define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT)
+#define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1)
+
+#define TEST_PARMAN_BASE_SHIFT 8
+#define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT)
+#define TEST_PARMAN_RESIZE_STEP_SHIFT 7
+#define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT)
+
+#define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT)
+#define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT)
+#define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1)
+
+#define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256)
+
+struct test_parman_prio {
+ struct parman_prio parman_prio;
+ unsigned long priority;
+};
+
+struct test_parman_item {
+ struct parman_item parman_item;
+ struct test_parman_prio *prio;
+ bool used;
+};
+
+struct test_parman {
+ struct parman *parman;
+ struct test_parman_item **prio_array;
+ unsigned long prio_array_limit;
+ struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT];
+ struct test_parman_item items[TEST_PARMAN_ITEM_COUNT];
+ struct rnd_state rnd;
+ unsigned long run_budget;
+ unsigned long bulk_budget;
+ bool bulk_noop;
+ unsigned int used_items;
+};
+
+#define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count))
+
+static int test_parman_resize(void *priv, unsigned long new_count)
+{
+ struct test_parman *test_parman = priv;
+ struct test_parman_item **prio_array;
+ unsigned long old_count;
+
+ prio_array = krealloc(test_parman->prio_array,
+ ITEM_PTRS_SIZE(new_count), GFP_KERNEL);
+ if (new_count == 0)
+ return 0;
+ if (!prio_array)
+ return -ENOMEM;
+ old_count = test_parman->prio_array_limit;
+ if (new_count > old_count)
+ memset(&prio_array[old_count], 0,
+ ITEM_PTRS_SIZE(new_count - old_count));
+ test_parman->prio_array = prio_array;
+ test_parman->prio_array_limit = new_count;
+ return 0;
+}
+
+static void test_parman_move(void *priv, unsigned long from_index,
+ unsigned long to_index, unsigned long count)
+{
+ struct test_parman *test_parman = priv;
+ struct test_parman_item **prio_array = test_parman->prio_array;
+
+ memmove(&prio_array[to_index], &prio_array[from_index],
+ ITEM_PTRS_SIZE(count));
+ memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count));
+}
+
+static const struct parman_ops test_parman_lsort_ops = {
+ .base_count = TEST_PARMAN_BASE_COUNT,
+ .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT,
+ .resize = test_parman_resize,
+ .move = test_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static void test_parman_rnd_init(struct test_parman *test_parman)
+{
+ prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL);
+}
+
+static u32 test_parman_rnd_get(struct test_parman *test_parman)
+{
+ return prandom_u32_state(&test_parman->rnd);
+}
+
+static unsigned long test_parman_priority_gen(struct test_parman *test_parman)
+{
+ unsigned long priority;
+ int i;
+
+again:
+ priority = test_parman_rnd_get(test_parman);
+ if (priority == 0)
+ goto again;
+
+ for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+ struct test_parman_prio *prio = &test_parman->prios[i];
+
+ if (prio->priority == 0)
+ break;
+ if (prio->priority == priority)
+ goto again;
+ }
+ return priority;
+}
+
+static void test_parman_prios_init(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+ struct test_parman_prio *prio = &test_parman->prios[i];
+
+ /* Assign random uniqueue priority to each prio structure */
+ prio->priority = test_parman_priority_gen(test_parman);
+ parman_prio_init(test_parman->parman, &prio->parman_prio,
+ prio->priority);
+ }
+}
+
+static void test_parman_prios_fini(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+ struct test_parman_prio *prio = &test_parman->prios[i];
+
+ parman_prio_fini(&prio->parman_prio);
+ }
+}
+
+static void test_parman_items_init(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
+ struct test_parman_item *item = &test_parman->items[i];
+ unsigned int prio_index = test_parman_rnd_get(test_parman) &
+ TEST_PARMAN_PRIO_MASK;
+
+ /* Assign random prio to each item structure */
+ item->prio = &test_parman->prios[prio_index];
+ }
+}
+
+static void test_parman_items_fini(struct test_parman *test_parman)
+{
+ int i;
+
+ for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
+ struct test_parman_item *item = &test_parman->items[i];
+
+ if (!item->used)
+ continue;
+ parman_item_remove(test_parman->parman,
+ &item->prio->parman_prio,
+ &item->parman_item);
+ }
+}
+
+static struct test_parman *test_parman_create(const struct parman_ops *ops)
+{
+ struct test_parman *test_parman;
+ int err;
+
+ test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL);
+ if (!test_parman)
+ return ERR_PTR(-ENOMEM);
+ err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT);
+ if (err)
+ goto err_resize;
+ test_parman->parman = parman_create(ops, test_parman);
+ if (!test_parman->parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ test_parman_rnd_init(test_parman);
+ test_parman_prios_init(test_parman);
+ test_parman_items_init(test_parman);
+ test_parman->run_budget = TEST_PARMAN_RUN_BUDGET;
+ return test_parman;
+
+err_parman_create:
+ test_parman_resize(test_parman, 0);
+err_resize:
+ kfree(test_parman);
+ return ERR_PTR(err);
+}
+
+static void test_parman_destroy(struct test_parman *test_parman)
+{
+ test_parman_items_fini(test_parman);
+ test_parman_prios_fini(test_parman);
+ parman_destroy(test_parman->parman);
+ test_parman_resize(test_parman, 0);
+ kfree(test_parman);
+}
+
+static bool test_parman_run_check_budgets(struct test_parman *test_parman)
+{
+ if (test_parman->run_budget-- == 0)
+ return false;
+ if (test_parman->bulk_budget-- != 0)
+ return true;
+
+ test_parman->bulk_budget = test_parman_rnd_get(test_parman) &
+ TEST_PARMAN_BULK_MAX_MASK;
+ test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1;
+ return true;
+}
+
+static int test_parman_run(struct test_parman *test_parman)
+{
+ unsigned int i = test_parman_rnd_get(test_parman);
+ int err;
+
+ while (test_parman_run_check_budgets(test_parman)) {
+ unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK;
+ struct test_parman_item *item = &test_parman->items[item_index];
+
+ if (test_parman->bulk_noop)
+ continue;
+
+ if (!item->used) {
+ err = parman_item_add(test_parman->parman,
+ &item->prio->parman_prio,
+ &item->parman_item);
+ if (err)
+ return err;
+ test_parman->prio_array[item->parman_item.index] = item;
+ test_parman->used_items++;
+ } else {
+ test_parman->prio_array[item->parman_item.index] = NULL;
+ parman_item_remove(test_parman->parman,
+ &item->prio->parman_prio,
+ &item->parman_item);
+ test_parman->used_items--;
+ }
+ item->used = !item->used;
+ }
+ return 0;
+}
+
+static int test_parman_check_array(struct test_parman *test_parman,
+ bool gaps_allowed)
+{
+ unsigned int last_unused_items = 0;
+ unsigned long last_priority = 0;
+ unsigned int used_items = 0;
+ int i;
+
+ if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) {
+ pr_err("Array limit is lower than the base count (%lu < %lu)\n",
+ test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < test_parman->prio_array_limit; i++) {
+ struct test_parman_item *item = test_parman->prio_array[i];
+
+ if (!item) {
+ last_unused_items++;
+ continue;
+ }
+ if (last_unused_items && !gaps_allowed) {
+ pr_err("Gap found in array even though they are forbidden\n");
+ return -EINVAL;
+ }
+
+ last_unused_items = 0;
+ used_items++;
+
+ if (item->prio->priority < last_priority) {
+ pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n",
+ item->prio->priority, last_priority);
+ return -EINVAL;
+ }
+ last_priority = item->prio->priority;
+
+ if (item->parman_item.index != i) {
+ pr_err("Item has different index in compare to where it actually is (%lu != %d)\n",
+ item->parman_item.index, i);
+ return -EINVAL;
+ }
+ }
+
+ if (used_items != test_parman->used_items) {
+ pr_err("Number of used items in array does not match (%u != %u)\n",
+ used_items, test_parman->used_items);
+ return -EINVAL;
+ }
+
+ if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) {
+ pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n",
+ last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT);
+ return -EINVAL;
+ }
+
+ pr_info("Priority array check successful\n");
+
+ return 0;
+}
+
+static int test_parman_lsort(void)
+{
+ struct test_parman *test_parman;
+ int err;
+
+ test_parman = test_parman_create(&test_parman_lsort_ops);
+ if (IS_ERR(test_parman))
+ return PTR_ERR(test_parman);
+
+ err = test_parman_run(test_parman);
+ if (err)
+ goto out;
+
+ err = test_parman_check_array(test_parman, false);
+ if (err)
+ goto out;
+out:
+ test_parman_destroy(test_parman);
+ return err;
+}
+
+static int __init test_parman_init(void)
+{
+ return test_parman_lsort();
+}
+
+static void __exit test_parman_exit(void)
+{
+}
+
+module_init(test_parman_init);
+module_exit(test_parman_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for parman");
diff --git a/lib/test_printf.c b/lib/test_printf.c
new file mode 100644
index 000000000..69b6a5e17
--- /dev/null
+++ b/lib/test_printf.c
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for printf facility.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+#include <linux/bitmap.h>
+#include <linux/dcache.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+
+#include <linux/property.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+#define BUF_SIZE 256
+#define PAD_SIZE 16
+#define FILL_CHAR '$'
+
+#define NOWARN(option, comment, block) \
+ __diag_push(); \
+ __diag_ignore_all(#option, comment); \
+ block \
+ __diag_pop();
+
+KSTM_MODULE_GLOBALS();
+
+static char *test_buffer __initdata;
+static char *alloced_buffer __initdata;
+
+static int __printf(4, 0) __init
+do_test(int bufsize, const char *expect, int elen,
+ const char *fmt, va_list ap)
+{
+ va_list aq;
+ int ret, written;
+
+ total_tests++;
+
+ memset(alloced_buffer, FILL_CHAR, BUF_SIZE + 2*PAD_SIZE);
+ va_copy(aq, ap);
+ ret = vsnprintf(test_buffer, bufsize, fmt, aq);
+ va_end(aq);
+
+ if (ret != elen) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n",
+ bufsize, fmt, ret, elen);
+ return 1;
+ }
+
+ if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt);
+ return 1;
+ }
+
+ if (!bufsize) {
+ if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) {
+ pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n",
+ fmt);
+ return 1;
+ }
+ return 0;
+ }
+
+ written = min(bufsize-1, elen);
+ if (test_buffer[written]) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n",
+ bufsize, fmt);
+ return 1;
+ }
+
+ if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n",
+ bufsize, fmt);
+ return 1;
+ }
+
+ if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt);
+ return 1;
+ }
+
+ if (memcmp(test_buffer, expect, written)) {
+ pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n",
+ bufsize, fmt, test_buffer, written, expect);
+ return 1;
+ }
+ return 0;
+}
+
+static void __printf(3, 4) __init
+__test(const char *expect, int elen, const char *fmt, ...)
+{
+ va_list ap;
+ int rand;
+ char *p;
+
+ if (elen >= BUF_SIZE) {
+ pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n",
+ elen, fmt);
+ failed_tests++;
+ return;
+ }
+
+ va_start(ap, fmt);
+
+ /*
+ * Every fmt+args is subjected to four tests: Three where we
+ * tell vsnprintf varying buffer sizes (plenty, not quite
+ * enough and 0), and then we also test that kvasprintf would
+ * be able to print it as expected.
+ */
+ failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
+ rand = get_random_u32_inclusive(1, elen + 1);
+ /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
+ failed_tests += do_test(rand, expect, elen, fmt, ap);
+ failed_tests += do_test(0, expect, elen, fmt, ap);
+
+ p = kvasprintf(GFP_KERNEL, fmt, ap);
+ if (p) {
+ total_tests++;
+ if (memcmp(p, expect, elen+1)) {
+ pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n",
+ fmt, p, expect);
+ failed_tests++;
+ }
+ kfree(p);
+ }
+ va_end(ap);
+}
+
+#define test(expect, fmt, ...) \
+ __test(expect, strlen(expect), fmt, ##__VA_ARGS__)
+
+static void __init
+test_basic(void)
+{
+ /* Work around annoying "warning: zero-length gnu_printf format string". */
+ char nul = '\0';
+
+ test("", &nul);
+ test("100%", "100%%");
+ test("xxx%yyy", "xxx%cyyy", '%');
+ __test("xxx\0yyy", 7, "xxx%cyyy", '\0');
+}
+
+static void __init
+test_number(void)
+{
+ test("0x1234abcd ", "%#-12x", 0x1234abcd);
+ test(" 0x1234abcd", "%#12x", 0x1234abcd);
+ test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234);
+ NOWARN(-Wformat, "Intentionally test narrowing conversion specifiers.", {
+ test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1);
+ test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1);
+ test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627);
+ })
+ /*
+ * POSIX/C99: »The result of converting zero with an explicit
+ * precision of zero shall be no characters.« Hence the output
+ * from the below test should really be "00|0||| ". However,
+ * the kernel's printf also produces a single 0 in that
+ * case. This test case simply documents the current
+ * behaviour.
+ */
+ test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
+}
+
+static void __init
+test_string(void)
+{
+ test("", "%s%.0s", "", "123");
+ test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456");
+ test("1 | 2|3 | 4|5 ", "%-3s|%3s|%-*s|%*s|%*s", "1", "2", 3, "3", 3, "4", -3, "5");
+ test("1234 ", "%-10.4s", "123456");
+ test(" 1234", "%10.4s", "123456");
+ /*
+ * POSIX and C99 say that a negative precision (which is only
+ * possible to pass via a * argument) should be treated as if
+ * the precision wasn't present, and that if the precision is
+ * omitted (as in %.s), the precision should be taken to be
+ * 0. However, the kernel's printf behave exactly opposite,
+ * treating a negative precision as 0 and treating an omitted
+ * precision specifier as if no precision was given.
+ *
+ * These test cases document the current behaviour; should
+ * anyone ever feel the need to follow the standards more
+ * closely, this can be revisited.
+ */
+ test(" ", "%4.*s", -5, "123456");
+ test("123456", "%.s", "123456");
+ test("a||", "%.s|%.0s|%.*s", "a", "b", 0, "c");
+ test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
+}
+
+#define PLAIN_BUF_SIZE 64 /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789abUL)
+#define PTR_STR "ffff0123456789ab"
+#define PTR_VAL_NO_CRNG "(____ptrval____)"
+#define ZEROS "00000000" /* hex 32 zero bits */
+#define ONES "ffffffff" /* hex 32 one bits */
+
+static int __init
+plain_format(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int nchars;
+
+ nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ return -1;
+
+ return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+#define PTR_VAL_NO_CRNG "(ptrval)"
+#define ZEROS ""
+#define ONES ""
+
+static int __init
+plain_format(void)
+{
+ /* Format is implicitly tested for 32 bit machines by plain_hash() */
+ return 0;
+}
+
+#endif /* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash_to_buffer(const void *p, char *buf, size_t len)
+{
+ int nchars;
+
+ nchars = snprintf(buf, len, "%p", p);
+
+ if (nchars != PTR_WIDTH)
+ return -1;
+
+ if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) {
+ pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"",
+ PTR_VAL_NO_CRNG);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int __init
+plain_hash(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int ret;
+
+ ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE);
+ if (ret)
+ return ret;
+
+ if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
+static void __init
+plain(void)
+{
+ int err;
+
+ if (no_hash_pointers) {
+ pr_warn("skipping plain 'p' tests");
+ skipped_tests += 2;
+ return;
+ }
+
+ err = plain_hash();
+ if (err) {
+ pr_warn("plain 'p' does not appear to be hashed\n");
+ failed_tests++;
+ return;
+ }
+
+ err = plain_format();
+ if (err) {
+ pr_warn("hashing plain 'p' has unexpected format\n");
+ failed_tests++;
+ }
+}
+
+static void __init
+test_hashed(const char *fmt, const void *p)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int ret;
+
+ /*
+ * No need to increase failed test counter since this is assumed
+ * to be called after plain().
+ */
+ ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
+ if (ret)
+ return;
+
+ test(buf, fmt, p);
+}
+
+/*
+ * NULL pointers aren't hashed.
+ */
+static void __init
+null_pointer(void)
+{
+ test(ZEROS "00000000", "%p", NULL);
+ test(ZEROS "00000000", "%px", NULL);
+ test("(null)", "%pE", NULL);
+}
+
+/*
+ * Error pointers aren't hashed.
+ */
+static void __init
+error_pointer(void)
+{
+ test(ONES "fffffff5", "%p", ERR_PTR(-11));
+ test(ONES "fffffff5", "%px", ERR_PTR(-11));
+ test("(efault)", "%pE", ERR_PTR(-11));
+}
+
+#define PTR_INVALID ((void *)0x000000ab)
+
+static void __init
+invalid_pointer(void)
+{
+ test_hashed("%p", PTR_INVALID);
+ test(ZEROS "000000ab", "%px", PTR_INVALID);
+ test("(efault)", "%pE", PTR_INVALID);
+}
+
+static void __init
+symbol_ptr(void)
+{
+}
+
+static void __init
+kernel_ptr(void)
+{
+ /* We can't test this without access to kptr_restrict. */
+}
+
+static void __init
+struct_resource(void)
+{
+}
+
+static void __init
+addr(void)
+{
+}
+
+static void __init
+escaped_str(void)
+{
+}
+
+static void __init
+hex_string(void)
+{
+ const char buf[3] = {0xc0, 0xff, 0xee};
+
+ test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee",
+ "%3ph|%3phC|%3phD|%3phN", buf, buf, buf, buf);
+ test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee",
+ "%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf);
+}
+
+static void __init
+mac(void)
+{
+ const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05};
+
+ test("2d:48:d6:fc:7a:05", "%pM", addr);
+ test("05:7a:fc:d6:48:2d", "%pMR", addr);
+ test("2d-48-d6-fc-7a-05", "%pMF", addr);
+ test("2d48d6fc7a05", "%pm", addr);
+ test("057afcd6482d", "%pmR", addr);
+}
+
+static void __init
+ip4(void)
+{
+ struct sockaddr_in sa;
+
+ sa.sin_family = AF_INET;
+ sa.sin_port = cpu_to_be16(12345);
+ sa.sin_addr.s_addr = cpu_to_be32(0x7f000001);
+
+ test("127.000.000.001|127.0.0.1", "%pi4|%pI4", &sa.sin_addr, &sa.sin_addr);
+ test("127.000.000.001|127.0.0.1", "%piS|%pIS", &sa, &sa);
+ sa.sin_addr.s_addr = cpu_to_be32(0x01020304);
+ test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa);
+}
+
+static void __init
+ip6(void)
+{
+}
+
+static void __init
+ip(void)
+{
+ ip4();
+ ip6();
+}
+
+static void __init
+uuid(void)
+{
+ const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+ 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+
+ test("00010203-0405-0607-0809-0a0b0c0d0e0f", "%pUb", uuid);
+ test("00010203-0405-0607-0809-0A0B0C0D0E0F", "%pUB", uuid);
+ test("03020100-0504-0706-0809-0a0b0c0d0e0f", "%pUl", uuid);
+ test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid);
+}
+
+static struct dentry test_dentry[4] __initdata = {
+ { .d_parent = &test_dentry[0],
+ .d_name = QSTR_INIT(test_dentry[0].d_iname, 3),
+ .d_iname = "foo" },
+ { .d_parent = &test_dentry[0],
+ .d_name = QSTR_INIT(test_dentry[1].d_iname, 5),
+ .d_iname = "bravo" },
+ { .d_parent = &test_dentry[1],
+ .d_name = QSTR_INIT(test_dentry[2].d_iname, 4),
+ .d_iname = "alfa" },
+ { .d_parent = &test_dentry[2],
+ .d_name = QSTR_INIT(test_dentry[3].d_iname, 5),
+ .d_iname = "romeo" },
+};
+
+static void __init
+dentry(void)
+{
+ test("foo", "%pd", &test_dentry[0]);
+ test("foo", "%pd2", &test_dentry[0]);
+
+ test("(null)", "%pd", NULL);
+ test("(efault)", "%pd", PTR_INVALID);
+ test("(null)", "%pD", NULL);
+ test("(efault)", "%pD", PTR_INVALID);
+
+ test("romeo", "%pd", &test_dentry[3]);
+ test("alfa/romeo", "%pd2", &test_dentry[3]);
+ test("bravo/alfa/romeo", "%pd3", &test_dentry[3]);
+ test("/bravo/alfa/romeo", "%pd4", &test_dentry[3]);
+ test("/bravo/alfa", "%pd4", &test_dentry[2]);
+
+ test("bravo/alfa |bravo/alfa ", "%-12pd2|%*pd2", &test_dentry[2], -12, &test_dentry[2]);
+ test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]);
+}
+
+static void __init
+struct_va_format(void)
+{
+}
+
+static void __init
+time_and_date(void)
+{
+ /* 1543210543 */
+ const struct rtc_time tm = {
+ .tm_sec = 43,
+ .tm_min = 35,
+ .tm_hour = 5,
+ .tm_mday = 26,
+ .tm_mon = 10,
+ .tm_year = 118,
+ };
+ /* 2019-01-04T15:32:23 */
+ time64_t t = 1546615943;
+
+ test("(%pt?)", "%pt", &tm);
+ test("2018-11-26T05:35:43", "%ptR", &tm);
+ test("0118-10-26T05:35:43", "%ptRr", &tm);
+ test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
+ test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
+ test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
+ test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
+
+ test("2019-01-04T15:32:23", "%ptT", &t);
+ test("0119-00-04T15:32:23", "%ptTr", &t);
+ test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t);
+
+ test("2019-01-04 15:32:23", "%ptTs", &t);
+ test("0119-00-04 15:32:23", "%ptTsr", &t);
+ test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t);
+ test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t);
+}
+
+static void __init
+struct_clk(void)
+{
+}
+
+static void __init
+large_bitmap(void)
+{
+ const int nbits = 1 << 16;
+ unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
+ if (!bits)
+ return;
+
+ bitmap_set(bits, 1, 20);
+ bitmap_set(bits, 60000, 15);
+ test("1-20,60000-60014", "%*pbl", nbits, bits);
+ bitmap_free(bits);
+}
+
+static void __init
+bitmap(void)
+{
+ DECLARE_BITMAP(bits, 20);
+ const int primes[] = {2,3,5,7,11,13,17,19};
+ int i;
+
+ bitmap_zero(bits, 20);
+ test("00000|00000", "%20pb|%*pb", bits, 20, bits);
+ test("|", "%20pbl|%*pbl", bits, 20, bits);
+
+ for (i = 0; i < ARRAY_SIZE(primes); ++i)
+ set_bit(primes[i], bits);
+ test("a28ac|a28ac", "%20pb|%*pb", bits, 20, bits);
+ test("2-3,5,7,11,13,17,19|2-3,5,7,11,13,17,19", "%20pbl|%*pbl", bits, 20, bits);
+
+ bitmap_fill(bits, 20);
+ test("fffff|fffff", "%20pb|%*pb", bits, 20, bits);
+ test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits);
+
+ large_bitmap();
+}
+
+static void __init
+netdev_features(void)
+{
+}
+
+struct page_flags_test {
+ int width;
+ int shift;
+ int mask;
+ const char *fmt;
+ const char *name;
+};
+
+static const struct page_flags_test pft[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ "%d", "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ "%d", "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ "%d", "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ "%#x", "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ "%#x", "kasantag"},
+};
+
+static void __init
+page_flags_test(int section, int node, int zone, int last_cpupid,
+ int kasan_tag, unsigned long flags, const char *name,
+ char *cmp_buf)
+{
+ unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
+ unsigned long size;
+ bool append = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(values); i++)
+ flags |= (values[i] & pft[i].mask) << pft[i].shift;
+
+ size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags);
+ if (flags & PAGEFLAGS_MASK) {
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+ append = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pft); i++) {
+ if (!pft[i].width)
+ continue;
+
+ if (append)
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|");
+
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=",
+ pft[i].name);
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt,
+ values[i] & pft[i].mask);
+ append = true;
+ }
+
+ snprintf(cmp_buf + size, BUF_SIZE - size, ")");
+
+ test(cmp_buf, "%pGp", &flags);
+}
+
+static void __init page_type_test(unsigned int page_type, const char *name,
+ char *cmp_buf)
+{
+ unsigned long size;
+
+ size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type);
+ if (page_type_has_type(page_type))
+ size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name);
+
+ snprintf(cmp_buf + size, BUF_SIZE - size, ")");
+ test(cmp_buf, "%pGt", &page_type);
+}
+
+static void __init
+flags(void)
+{
+ unsigned long flags;
+ char *cmp_buffer;
+ gfp_t gfp;
+ unsigned int page_type;
+
+ cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!cmp_buffer)
+ return;
+
+ flags = 0;
+ page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+
+ flags = 1UL << NR_PAGEFLAGS;
+ page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer);
+
+ flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru
+ | 1UL << PG_active | 1UL << PG_swapbacked;
+ page_flags_test(1, 1, 1, 0x1fffff, 1, flags,
+ "uptodate|dirty|lru|active|swapbacked",
+ cmp_buffer);
+
+ flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+ test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags);
+
+ gfp = GFP_TRANSHUGE;
+ test("GFP_TRANSHUGE", "%pGg", &gfp);
+
+ gfp = GFP_ATOMIC|__GFP_DMA;
+ test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp);
+
+ gfp = __GFP_HIGH;
+ test("__GFP_HIGH", "%pGg", &gfp);
+
+ /* Any flags not translated by the table should remain numeric */
+ gfp = ~__GFP_BITS_MASK;
+ snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp);
+ test(cmp_buffer, "%pGg", &gfp);
+
+ snprintf(cmp_buffer, BUF_SIZE, "__GFP_HIGH|%#lx",
+ (unsigned long) gfp);
+ gfp |= __GFP_HIGH;
+ test(cmp_buffer, "%pGg", &gfp);
+
+ page_type = ~0;
+ page_type_test(page_type, "", cmp_buffer);
+
+ page_type = 10;
+ page_type_test(page_type, "", cmp_buffer);
+
+ page_type = ~PG_buddy;
+ page_type_test(page_type, "buddy", cmp_buffer);
+
+ page_type = ~(PG_table | PG_buddy);
+ page_type_test(page_type, "table|buddy", cmp_buffer);
+
+ kfree(cmp_buffer);
+}
+
+static void __init fwnode_pointer(void)
+{
+ const struct software_node first = { .name = "first" };
+ const struct software_node second = { .name = "second", .parent = &first };
+ const struct software_node third = { .name = "third", .parent = &second };
+ const struct software_node *group[] = { &first, &second, &third, NULL };
+ const char * const full_name_second = "first/second";
+ const char * const full_name_third = "first/second/third";
+ const char * const second_name = "second";
+ const char * const third_name = "third";
+ int rval;
+
+ rval = software_node_register_node_group(group);
+ if (rval) {
+ pr_warn("cannot register softnodes; rval %d\n", rval);
+ return;
+ }
+
+ test(full_name_second, "%pfw", software_node_fwnode(&second));
+ test(full_name_third, "%pfw", software_node_fwnode(&third));
+ test(full_name_third, "%pfwf", software_node_fwnode(&third));
+ test(second_name, "%pfwP", software_node_fwnode(&second));
+ test(third_name, "%pfwP", software_node_fwnode(&third));
+
+ software_node_unregister_node_group(group);
+}
+
+static void __init fourcc_pointer(void)
+{
+ struct {
+ u32 code;
+ char *str;
+ } const try[] = {
+ { 0x3231564e, "NV12 little-endian (0x3231564e)", },
+ { 0xb231564e, "NV12 big-endian (0xb231564e)", },
+ { 0x10111213, ".... little-endian (0x10111213)", },
+ { 0x20303159, "Y10 little-endian (0x20303159)", },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(try); i++)
+ test(try[i].str, "%p4cc", &try[i].code);
+}
+
+static void __init
+errptr(void)
+{
+ test("-1234", "%pe", ERR_PTR(-1234));
+
+ /* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */
+ BUILD_BUG_ON(IS_ERR(PTR));
+ test_hashed("%pe", PTR);
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+ test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK));
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EAGAIN));
+ BUILD_BUG_ON(EAGAIN != EWOULDBLOCK);
+ test("(-EAGAIN)", "(%pe)", ERR_PTR(-EWOULDBLOCK));
+ test("[-EIO ]", "[%-8pe]", ERR_PTR(-EIO));
+ test("[ -EIO]", "[%8pe]", ERR_PTR(-EIO));
+ test("-EPROBE_DEFER", "%pe", ERR_PTR(-EPROBE_DEFER));
+#endif
+}
+
+static void __init
+test_pointer(void)
+{
+ plain();
+ null_pointer();
+ error_pointer();
+ invalid_pointer();
+ symbol_ptr();
+ kernel_ptr();
+ struct_resource();
+ addr();
+ escaped_str();
+ hex_string();
+ mac();
+ ip();
+ uuid();
+ dentry();
+ struct_va_format();
+ time_and_date();
+ struct_clk();
+ bitmap();
+ netdev_features();
+ flags();
+ errptr();
+ fwnode_pointer();
+ fourcc_pointer();
+}
+
+static void __init selftest(void)
+{
+ alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
+ if (!alloced_buffer)
+ return;
+ test_buffer = alloced_buffer + PAD_SIZE;
+
+ test_basic();
+ test_number();
+ test_string();
+ test_pointer();
+
+ kfree(alloced_buffer);
+}
+
+KSTM_MODULE_LOADERS(test_printf);
+MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c
new file mode 100644
index 000000000..49970a7c9
--- /dev/null
+++ b/lib/test_ref_tracker.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Referrence tracker self test.
+ *
+ * Copyright (c) 2021 Eric Dumazet <edumazet@google.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+static struct ref_tracker_dir ref_dir;
+static struct ref_tracker *tracker[20];
+
+#define TRT_ALLOC(X) static noinline void \
+ alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \
+ struct ref_tracker **trackerp) \
+ { \
+ ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \
+ }
+
+TRT_ALLOC(1)
+TRT_ALLOC(2)
+TRT_ALLOC(3)
+TRT_ALLOC(4)
+TRT_ALLOC(5)
+TRT_ALLOC(6)
+TRT_ALLOC(7)
+TRT_ALLOC(8)
+TRT_ALLOC(9)
+TRT_ALLOC(10)
+TRT_ALLOC(11)
+TRT_ALLOC(12)
+TRT_ALLOC(13)
+TRT_ALLOC(14)
+TRT_ALLOC(15)
+TRT_ALLOC(16)
+TRT_ALLOC(17)
+TRT_ALLOC(18)
+TRT_ALLOC(19)
+
+#undef TRT_ALLOC
+
+static noinline void
+alloctest_ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ ref_tracker_free(dir, trackerp);
+}
+
+
+static struct timer_list test_ref_tracker_timer;
+static atomic_t test_ref_timer_done = ATOMIC_INIT(0);
+
+static void test_ref_tracker_timer_func(struct timer_list *t)
+{
+ ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC);
+ atomic_set(&test_ref_timer_done, 1);
+}
+
+static int __init test_ref_tracker_init(void)
+{
+ int i;
+
+ ref_tracker_dir_init(&ref_dir, 100, "selftest");
+
+ timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0);
+ mod_timer(&test_ref_tracker_timer, jiffies + 1);
+
+ alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]);
+ alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]);
+ alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]);
+ alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]);
+ alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]);
+ alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]);
+ alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]);
+ alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]);
+ alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]);
+ alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]);
+ alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]);
+ alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]);
+ alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]);
+ alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]);
+ alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]);
+ alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]);
+ alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]);
+ alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]);
+ alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]);
+
+ /* free all trackers but first 0 and 1. */
+ for (i = 2; i < ARRAY_SIZE(tracker); i++)
+ alloctest_ref_tracker_free(&ref_dir, &tracker[i]);
+
+ /* Attempt to free an already freed tracker. */
+ alloctest_ref_tracker_free(&ref_dir, &tracker[2]);
+
+ while (!atomic_read(&test_ref_timer_done))
+ msleep(1);
+
+ /* This should warn about tracker[0] & tracker[1] being not freed. */
+ ref_tracker_dir_exit(&ref_dir);
+
+ return 0;
+}
+
+static void __exit test_ref_tracker_exit(void)
+{
+}
+
+module_init(test_ref_tracker_init);
+module_exit(test_ref_tracker_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
new file mode 100644
index 000000000..c20f6cb4b
--- /dev/null
+++ b/lib/test_rhashtable.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ */
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#include <linux/init.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+#include <linux/rhashtable.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#define MAX_ENTRIES 1000000
+#define TEST_INSERT_FAIL INT_MAX
+
+static int parm_entries = 50000;
+module_param(parm_entries, int, 0);
+MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)");
+
+static int runs = 4;
+module_param(runs, int, 0);
+MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
+
+static int max_size = 0;
+module_param(max_size, int, 0);
+MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
+
+static bool shrinking = false;
+module_param(shrinking, bool, 0);
+MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
+
+static int size = 8;
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
+
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
+static bool enomem_retry = false;
+module_param(enomem_retry, bool, 0);
+MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
+
+struct test_obj_val {
+ int id;
+ int tid;
+};
+
+struct test_obj {
+ struct test_obj_val value;
+ struct rhash_head node;
+};
+
+struct test_obj_rhl {
+ struct test_obj_val value;
+ struct rhlist_head list_node;
+};
+
+struct thread_data {
+ unsigned int entries;
+ int id;
+ struct task_struct *task;
+ struct test_obj *objs;
+};
+
+static u32 my_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct test_obj_rhl *obj = data;
+
+ return (obj->value.id % 10);
+}
+
+static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
+{
+ const struct test_obj_rhl *test_obj = obj;
+ const struct test_obj_val *val = arg->key;
+
+ return test_obj->value.id - val->id;
+}
+
+static struct rhashtable_params test_rht_params = {
+ .head_offset = offsetof(struct test_obj, node),
+ .key_offset = offsetof(struct test_obj, value),
+ .key_len = sizeof(struct test_obj_val),
+ .hashfn = jhash,
+};
+
+static struct rhashtable_params test_rht_params_dup = {
+ .head_offset = offsetof(struct test_obj_rhl, list_node),
+ .key_offset = offsetof(struct test_obj_rhl, value),
+ .key_len = sizeof(struct test_obj_val),
+ .hashfn = jhash,
+ .obj_hashfn = my_hashfn,
+ .obj_cmpfn = my_cmpfn,
+ .nelem_hint = 128,
+ .automatic_shrinking = false,
+};
+
+static atomic_t startup_count;
+static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
+
+static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
+ const struct rhashtable_params params)
+{
+ int err, retries = -1, enomem_retries = 0;
+
+ do {
+ retries++;
+ cond_resched();
+ err = rhashtable_insert_fast(ht, &obj->node, params);
+ if (err == -ENOMEM && enomem_retry) {
+ enomem_retries++;
+ err = -EBUSY;
+ }
+ } while (err == -EBUSY);
+
+ if (enomem_retries)
+ pr_info(" %u insertions retried after -ENOMEM\n",
+ enomem_retries);
+
+ return err ? : retries;
+}
+
+static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array,
+ unsigned int entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < entries; i++) {
+ struct test_obj *obj;
+ bool expected = !(i % 2);
+ struct test_obj_val key = {
+ .id = i,
+ };
+
+ if (array[i / 2].value.id == TEST_INSERT_FAIL)
+ expected = false;
+
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
+
+ if (expected && !obj) {
+ pr_warn("Test failed: Could not find key %u\n", key.id);
+ return -ENOENT;
+ } else if (!expected && obj) {
+ pr_warn("Test failed: Unexpected entry found for key %u\n",
+ key.id);
+ return -EEXIST;
+ } else if (expected && obj) {
+ if (obj->value.id != i) {
+ pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
+ obj->value.id, i);
+ return -EINVAL;
+ }
+ }
+
+ cond_resched_rcu();
+ }
+
+ return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
+{
+ unsigned int total = 0, chain_len = 0;
+ struct rhashtable_iter hti;
+ struct rhash_head *pos;
+
+ rhashtable_walk_enter(ht, &hti);
+ rhashtable_walk_start(&hti);
+
+ while ((pos = rhashtable_walk_next(&hti))) {
+ if (PTR_ERR(pos) == -EAGAIN) {
+ pr_info("Info: encountered resize\n");
+ chain_len++;
+ continue;
+ } else if (IS_ERR(pos)) {
+ pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
+ PTR_ERR(pos));
+ break;
+ }
+
+ total++;
+ }
+
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
+ total, atomic_read(&ht->nelems), entries, chain_len);
+
+ if (total != atomic_read(&ht->nelems) || total != entries)
+ pr_warn("Test failed: Total count mismatch ^^^");
+}
+
+static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array,
+ unsigned int entries)
+{
+ struct test_obj *obj;
+ int err;
+ unsigned int i, insert_retries = 0;
+ s64 start, end;
+
+ /*
+ * Insertion Test:
+ * Insert entries into table with all keys even numbers
+ */
+ pr_info(" Adding %d keys\n", entries);
+ start = ktime_get_ns();
+ for (i = 0; i < entries; i++) {
+ struct test_obj *obj = &array[i];
+
+ obj->value.id = i * 2;
+ err = insert_retry(ht, obj, test_rht_params);
+ if (err > 0)
+ insert_retries += err;
+ else if (err)
+ return err;
+ }
+
+ if (insert_retries)
+ pr_info(" %u insertions retried due to memory pressure\n",
+ insert_retries);
+
+ test_bucket_stats(ht, entries);
+ rcu_read_lock();
+ test_rht_lookup(ht, array, entries);
+ rcu_read_unlock();
+
+ test_bucket_stats(ht, entries);
+
+ pr_info(" Deleting %d keys\n", entries);
+ for (i = 0; i < entries; i++) {
+ struct test_obj_val key = {
+ .id = i * 2,
+ };
+
+ if (array[i].value.id != TEST_INSERT_FAIL) {
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
+ BUG_ON(!obj);
+
+ rhashtable_remove_fast(ht, &obj->node, test_rht_params);
+ }
+
+ cond_resched();
+ }
+
+ end = ktime_get_ns();
+ pr_info(" Duration of test: %lld ns\n", end - start);
+
+ return end - start;
+}
+
+static struct rhashtable ht;
+static struct rhltable rhlt;
+
+static int __init test_rhltable(unsigned int entries)
+{
+ struct test_obj_rhl *rhl_test_objects;
+ unsigned long *obj_in_table;
+ unsigned int i, j, k;
+ int ret, err;
+
+ if (entries == 0)
+ entries = 1;
+
+ rhl_test_objects = vzalloc(array_size(entries,
+ sizeof(*rhl_test_objects)));
+ if (!rhl_test_objects)
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+ obj_in_table = vzalloc(array_size(sizeof(unsigned long),
+ BITS_TO_LONGS(entries)));
+ if (!obj_in_table)
+ goto out_free;
+
+ err = rhltable_init(&rhlt, &test_rht_params);
+ if (WARN_ON(err))
+ goto out_free;
+
+ k = get_random_u32();
+ ret = 0;
+ for (i = 0; i < entries; i++) {
+ rhl_test_objects[i].value.id = k;
+ err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
+ test_rht_params);
+ if (WARN(err, "error %d on element %d\n", err, i))
+ break;
+ if (err == 0)
+ set_bit(i, obj_in_table);
+ }
+
+ if (err)
+ ret = err;
+
+ pr_info("test %d add/delete pairs into rhlist\n", entries);
+ for (i = 0; i < entries; i++) {
+ struct rhlist_head *h, *pos;
+ struct test_obj_rhl *obj;
+ struct test_obj_val key = {
+ .id = k,
+ };
+ bool found;
+
+ rcu_read_lock();
+ h = rhltable_lookup(&rhlt, &key, test_rht_params);
+ if (WARN(!h, "key not found during iteration %d of %d", i, entries)) {
+ rcu_read_unlock();
+ break;
+ }
+
+ if (i) {
+ j = i - 1;
+ rhl_for_each_entry_rcu(obj, pos, h, list_node) {
+ if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone"))
+ break;
+ }
+ }
+
+ cond_resched_rcu();
+
+ found = false;
+
+ rhl_for_each_entry_rcu(obj, pos, h, list_node) {
+ if (pos == &rhl_test_objects[i].list_node) {
+ found = true;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (WARN(!found, "element %d not found", i))
+ break;
+
+ err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i);
+ if (err == 0)
+ clear_bit(i, obj_in_table);
+ }
+
+ if (ret == 0 && err)
+ ret = err;
+
+ for (i = 0; i < entries; i++) {
+ WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i);
+
+ err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
+ test_rht_params);
+ if (WARN(err, "error %d on element %d\n", err, i))
+ break;
+ if (err == 0)
+ set_bit(i, obj_in_table);
+ }
+
+ pr_info("test %d random rhlist add/delete operations\n", entries);
+ for (j = 0; j < entries; j++) {
+ u32 i = get_random_u32_below(entries);
+ u32 prand = get_random_u32_below(4);
+
+ cond_resched();
+
+ err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ if (test_bit(i, obj_in_table)) {
+ clear_bit(i, obj_in_table);
+ if (WARN(err, "cannot remove element at slot %d", i))
+ continue;
+ } else {
+ if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d",
+ i, err, -ENOENT))
+ continue;
+ }
+
+ if (prand & 1) {
+ err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ if (err == 0) {
+ if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
+ continue;
+ } else {
+ if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
+ continue;
+ }
+ }
+
+ if (prand & 2) {
+ i = get_random_u32_below(entries);
+ if (test_bit(i, obj_in_table)) {
+ err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ WARN(err, "cannot remove element at slot %d", i);
+ if (err == 0)
+ clear_bit(i, obj_in_table);
+ } else {
+ err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ WARN(err, "failed to insert object %d", i);
+ if (err == 0)
+ set_bit(i, obj_in_table);
+ }
+ }
+ }
+
+ for (i = 0; i < entries; i++) {
+ cond_resched();
+ err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
+ if (test_bit(i, obj_in_table)) {
+ if (WARN(err, "cannot remove element at slot %d", i))
+ continue;
+ } else {
+ if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
+ err, -ENOENT))
+ continue;
+ }
+ }
+
+ rhltable_destroy(&rhlt);
+out_free:
+ vfree(rhl_test_objects);
+ vfree(obj_in_table);
+ return ret;
+}
+
+static int __init test_rhashtable_max(struct test_obj *array,
+ unsigned int entries)
+{
+ unsigned int i;
+ int err;
+
+ test_rht_params.max_size = roundup_pow_of_two(entries / 8);
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err)
+ return err;
+
+ for (i = 0; i < ht.max_elems; i++) {
+ struct test_obj *obj = &array[i];
+
+ obj->value.id = i * 2;
+ err = insert_retry(&ht, obj, test_rht_params);
+ if (err < 0)
+ return err;
+ }
+
+ err = insert_retry(&ht, &array[ht.max_elems], test_rht_params);
+ if (err == -E2BIG) {
+ err = 0;
+ } else {
+ pr_info("insert element %u should have failed with %d, got %d\n",
+ ht.max_elems, -E2BIG, err);
+ if (err == 0)
+ err = -1;
+ }
+
+ rhashtable_destroy(&ht);
+
+ return err;
+}
+
+static unsigned int __init print_ht(struct rhltable *rhlt)
+{
+ struct rhashtable *ht;
+ const struct bucket_table *tbl;
+ char buff[512] = "";
+ int offset = 0;
+ unsigned int i, cnt = 0;
+
+ ht = &rhlt->ht;
+ /* Take the mutex to avoid RCU warning */
+ mutex_lock(&ht->mutex);
+ tbl = rht_dereference(ht->tbl, ht);
+ for (i = 0; i < tbl->size; i++) {
+ struct rhash_head *pos, *next;
+ struct test_obj_rhl *p;
+
+ pos = rht_ptr_exclusive(tbl->buckets + i);
+ next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
+
+ if (!rht_is_a_nulls(pos)) {
+ offset += sprintf(buff + offset, "\nbucket[%d] -> ", i);
+ }
+
+ while (!rht_is_a_nulls(pos)) {
+ struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
+ offset += sprintf(buff + offset, "[[");
+ do {
+ pos = &list->rhead;
+ list = rht_dereference(list->next, ht);
+ p = rht_obj(ht, pos);
+
+ offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid,
+ list? ", " : " ");
+ cnt++;
+ } while (list);
+
+ pos = next,
+ next = !rht_is_a_nulls(pos) ?
+ rht_dereference(pos->next, ht) : NULL;
+
+ offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : "");
+ }
+ }
+ printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
+ mutex_unlock(&ht->mutex);
+
+ return cnt;
+}
+
+static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
+ int cnt, bool slow)
+{
+ struct rhltable *rhlt;
+ unsigned int i, ret;
+ const char *key;
+ int err = 0;
+
+ rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+ if (WARN_ON(!rhlt))
+ return -EINVAL;
+
+ err = rhltable_init(rhlt, &test_rht_params_dup);
+ if (WARN_ON(err)) {
+ kfree(rhlt);
+ return err;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ rhl_test_objects[i].value.tid = i;
+ key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
+ key += test_rht_params_dup.key_offset;
+
+ if (slow) {
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
+ &rhl_test_objects[i].list_node.rhead));
+ if (err == -EAGAIN)
+ err = 0;
+ } else
+ err = rhltable_insert(rhlt,
+ &rhl_test_objects[i].list_node,
+ test_rht_params_dup);
+ if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
+ goto skip_print;
+ }
+
+ ret = print_ht(rhlt);
+ WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
+
+skip_print:
+ rhltable_destroy(rhlt);
+ kfree(rhlt);
+
+ return 0;
+}
+
+static int __init test_insert_duplicates_run(void)
+{
+ struct test_obj_rhl rhl_test_objects[3] = {};
+
+ pr_info("test inserting duplicates\n");
+
+ /* two different values that map to same bucket */
+ rhl_test_objects[0].value.id = 1;
+ rhl_test_objects[1].value.id = 21;
+
+ /* and another duplicate with same as [0] value
+ * which will be second on the bucket list */
+ rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
+
+ test_insert_dup(rhl_test_objects, 2, false);
+ test_insert_dup(rhl_test_objects, 3, false);
+ test_insert_dup(rhl_test_objects, 2, true);
+ test_insert_dup(rhl_test_objects, 3, true);
+
+ return 0;
+}
+
+static int thread_lookup_test(struct thread_data *tdata)
+{
+ unsigned int entries = tdata->entries;
+ int i, err = 0;
+
+ for (i = 0; i < entries; i++) {
+ struct test_obj *obj;
+ struct test_obj_val key = {
+ .id = i,
+ .tid = tdata->id,
+ };
+
+ obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+ if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
+ pr_err(" found unexpected object %d-%d\n", key.tid, key.id);
+ err++;
+ } else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
+ pr_err(" object %d-%d not found!\n", key.tid, key.id);
+ err++;
+ } else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
+ pr_err(" wrong object returned (got %d-%d, expected %d-%d)\n",
+ obj->value.tid, obj->value.id, key.tid, key.id);
+ err++;
+ }
+
+ cond_resched();
+ }
+ return err;
+}
+
+static int threadfunc(void *data)
+{
+ int i, step, err = 0, insert_retries = 0;
+ struct thread_data *tdata = data;
+
+ if (atomic_dec_and_test(&startup_count))
+ wake_up(&startup_wait);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
+ pr_err(" thread[%d]: interrupted\n", tdata->id);
+ goto out;
+ }
+
+ for (i = 0; i < tdata->entries; i++) {
+ tdata->objs[i].value.id = i;
+ tdata->objs[i].value.tid = tdata->id;
+ err = insert_retry(&ht, &tdata->objs[i], test_rht_params);
+ if (err > 0) {
+ insert_retries += err;
+ } else if (err) {
+ pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
+ tdata->id);
+ goto out;
+ }
+ }
+ if (insert_retries)
+ pr_info(" thread[%d]: %u insertions retried due to memory pressure\n",
+ tdata->id, insert_retries);
+
+ err = thread_lookup_test(tdata);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_lookup_test failed\n",
+ tdata->id);
+ goto out;
+ }
+
+ for (step = 10; step > 0; step--) {
+ for (i = 0; i < tdata->entries; i += step) {
+ if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
+ continue;
+ err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+ test_rht_params);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_remove_fast failed\n",
+ tdata->id);
+ goto out;
+ }
+ tdata->objs[i].value.id = TEST_INSERT_FAIL;
+
+ cond_resched();
+ }
+ err = thread_lookup_test(tdata);
+ if (err) {
+ pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n",
+ tdata->id);
+ goto out;
+ }
+ }
+out:
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return err;
+}
+
+static int __init test_rht_init(void)
+{
+ unsigned int entries;
+ int i, err, started_threads = 0, failed_threads = 0;
+ u64 total_time = 0;
+ struct thread_data *tdata;
+ struct test_obj *objs;
+
+ if (parm_entries < 0)
+ parm_entries = 1;
+
+ entries = min(parm_entries, MAX_ENTRIES);
+
+ test_rht_params.automatic_shrinking = shrinking;
+ test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
+ test_rht_params.nelem_hint = size;
+
+ objs = vzalloc(array_size(sizeof(struct test_obj),
+ test_rht_params.max_size + 1));
+ if (!objs)
+ return -ENOMEM;
+
+ pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
+ size, max_size, shrinking);
+
+ for (i = 0; i < runs; i++) {
+ s64 time;
+
+ pr_info("Test %02d:\n", i);
+ memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj));
+
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ continue;
+ }
+
+ time = test_rhashtable(&ht, objs, entries);
+ rhashtable_destroy(&ht);
+ if (time < 0) {
+ vfree(objs);
+ pr_warn("Test failed: return code %lld\n", time);
+ return -EINVAL;
+ }
+
+ total_time += time;
+ }
+
+ pr_info("test if its possible to exceed max_size %d: %s\n",
+ test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ?
+ "no, ok" : "YES, failed");
+ vfree(objs);
+
+ do_div(total_time, runs);
+ pr_info("Average test time: %llu\n", total_time);
+
+ test_insert_duplicates_run();
+
+ if (!tcount)
+ return 0;
+
+ pr_info("Testing concurrent rhashtable access from %d threads\n",
+ tcount);
+ atomic_set(&startup_count, tcount);
+ tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
+ if (!tdata)
+ return -ENOMEM;
+ objs = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries));
+ if (!objs) {
+ vfree(tdata);
+ return -ENOMEM;
+ }
+
+ test_rht_params.max_size = max_size ? :
+ roundup_pow_of_two(tcount * entries);
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ vfree(tdata);
+ vfree(objs);
+ return -EINVAL;
+ }
+ for (i = 0; i < tcount; i++) {
+ tdata[i].id = i;
+ tdata[i].entries = entries;
+ tdata[i].objs = objs + i * entries;
+ tdata[i].task = kthread_run(threadfunc, &tdata[i],
+ "rhashtable_thrad[%d]", i);
+ if (IS_ERR(tdata[i].task)) {
+ pr_err(" kthread_run failed for thread %d\n", i);
+ atomic_dec(&startup_count);
+ } else {
+ started_threads++;
+ }
+ }
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
+ pr_err(" wait_event interruptible failed\n");
+ /* count is 0 now, set it to -1 and wake up all threads together */
+ atomic_dec(&startup_count);
+ wake_up_all(&startup_wait);
+ for (i = 0; i < tcount; i++) {
+ if (IS_ERR(tdata[i].task))
+ continue;
+ if ((err = kthread_stop(tdata[i].task))) {
+ pr_warn("Test failed: thread %d returned: %d\n",
+ i, err);
+ failed_threads++;
+ }
+ }
+ rhashtable_destroy(&ht);
+ vfree(tdata);
+ vfree(objs);
+
+ /*
+ * rhltable_remove is very expensive, default values can cause test
+ * to run for 2 minutes or more, use a smaller number instead.
+ */
+ err = test_rhltable(entries / 16);
+ pr_info("Started %d threads, %d failed, rhltable test returns %d\n",
+ started_threads, failed_threads, err);
+ return 0;
+}
+
+static void __exit test_rht_exit(void)
+{
+}
+
+module_init(test_rht_init);
+module_exit(test_rht_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
new file mode 100644
index 000000000..a2707af29
--- /dev/null
+++ b/lib/test_scanf.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for sscanf facility.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "../tools/testing/selftests/kselftest_module.h"
+
+#define BUF_SIZE 1024
+
+KSTM_MODULE_GLOBALS();
+static char *test_buffer __initdata;
+static char *fmt_buffer __initdata;
+static struct rnd_state rnd_state __initdata;
+
+typedef int (*check_fn)(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap);
+
+static void __scanf(4, 6) __init
+_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
+ int n_args, ...)
+{
+ va_list ap, ap_copy;
+ int ret;
+
+ total_tests++;
+
+ va_start(ap, n_args);
+ va_copy(ap_copy, ap);
+ ret = vsscanf(string, fmt, ap_copy);
+ va_end(ap_copy);
+
+ if (ret != n_args) {
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
+ string, fmt, ret, n_args);
+ goto fail;
+ }
+
+ ret = (*fn)(check_data, string, fmt, n_args, ap);
+ if (ret)
+ goto fail;
+
+ va_end(ap);
+
+ return;
+
+fail:
+ failed_tests++;
+ va_end(ap);
+}
+
+#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
+do { \
+ pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
+ for (; n_args > 0; n_args--, expect++) { \
+ typeof(*expect) got = *va_arg(ap, typeof(expect)); \
+ pr_debug("\t" arg_fmt "\n", got); \
+ if (got != *expect) { \
+ pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
+ str, fmt, *expect, got); \
+ return 1; \
+ } \
+ } \
+ return 0; \
+} while (0)
+
+static int __init check_ull(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long long *pval = check_data;
+
+ _check_numbers_template("%llu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ll(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long long *pval = check_data;
+
+ _check_numbers_template("%lld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ulong(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned long *pval = check_data;
+
+ _check_numbers_template("%lu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_long(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const long *pval = check_data;
+
+ _check_numbers_template("%ld", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uint(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned int *pval = check_data;
+
+ _check_numbers_template("%u", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_int(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const int *pval = check_data;
+
+ _check_numbers_template("%d", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_ushort(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned short *pval = check_data;
+
+ _check_numbers_template("%hu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_short(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const short *pval = check_data;
+
+ _check_numbers_template("%hd", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_uchar(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const unsigned char *pval = check_data;
+
+ _check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
+}
+
+static int __init check_char(const void *check_data, const char *string,
+ const char *fmt, int n_args, va_list ap)
+{
+ const signed char *pval = check_data;
+
+ _check_numbers_template("%hhd", pval, string, fmt, n_args, ap);
+}
+
+/* Selection of interesting numbers to test, copied from test-kstrtox.c */
+static const unsigned long long numbers[] __initconst = {
+ 0x0ULL,
+ 0x1ULL,
+ 0x7fULL,
+ 0x80ULL,
+ 0x81ULL,
+ 0xffULL,
+ 0x100ULL,
+ 0x101ULL,
+ 0x7fffULL,
+ 0x8000ULL,
+ 0x8001ULL,
+ 0xffffULL,
+ 0x10000ULL,
+ 0x10001ULL,
+ 0x7fffffffULL,
+ 0x80000000ULL,
+ 0x80000001ULL,
+ 0xffffffffULL,
+ 0x100000000ULL,
+ 0x100000001ULL,
+ 0x7fffffffffffffffULL,
+ 0x8000000000000000ULL,
+ 0x8000000000000001ULL,
+ 0xfffffffffffffffeULL,
+ 0xffffffffffffffffULL,
+};
+
+#define value_representable_in_type(T, val) \
+(is_signed_type(T) \
+ ? ((long long)(val) >= type_min(T)) && ((long long)(val) <= type_max(T)) \
+ : ((unsigned long long)(val) <= type_max(T)))
+
+
+#define test_one_number(T, gen_fmt, scan_fmt, val, fn) \
+do { \
+ const T expect_val = (T)(val); \
+ T result = ~expect_val; /* should be overwritten */ \
+ \
+ snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
+ _test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
+} while (0)
+
+#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ if (value_representable_in_type(T, numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ numbers[i], fn); \
+ \
+ if (value_representable_in_type(T, -numbers[i])) \
+ test_one_number(T, gen_fmt, scan_fmt, \
+ -numbers[i], fn); \
+ } \
+} while (0)
+
+static void __init numbers_simple(void)
+{
+ simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
+ simple_numbers_loop(long long, "%lld", "lld", check_ll);
+ simple_numbers_loop(long long, "%lld", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "%llx", "llx", check_ll);
+ simple_numbers_loop(long long, "0x%llx", "lli", check_ll);
+ simple_numbers_loop(unsigned long long, "0x%llx", "llx", check_ull);
+ simple_numbers_loop(long long, "0x%llx", "llx", check_ll);
+
+ simple_numbers_loop(unsigned long, "%lu", "lu", check_ulong);
+ simple_numbers_loop(long, "%ld", "ld", check_long);
+ simple_numbers_loop(long, "%ld", "li", check_long);
+ simple_numbers_loop(unsigned long, "%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "%lx", "lx", check_long);
+ simple_numbers_loop(long, "0x%lx", "li", check_long);
+ simple_numbers_loop(unsigned long, "0x%lx", "lx", check_ulong);
+ simple_numbers_loop(long, "0x%lx", "lx", check_long);
+
+ simple_numbers_loop(unsigned int, "%u", "u", check_uint);
+ simple_numbers_loop(int, "%d", "d", check_int);
+ simple_numbers_loop(int, "%d", "i", check_int);
+ simple_numbers_loop(unsigned int, "%x", "x", check_uint);
+ simple_numbers_loop(int, "%x", "x", check_int);
+ simple_numbers_loop(int, "0x%x", "i", check_int);
+ simple_numbers_loop(unsigned int, "0x%x", "x", check_uint);
+ simple_numbers_loop(int, "0x%x", "x", check_int);
+
+ simple_numbers_loop(unsigned short, "%hu", "hu", check_ushort);
+ simple_numbers_loop(short, "%hd", "hd", check_short);
+ simple_numbers_loop(short, "%hd", "hi", check_short);
+ simple_numbers_loop(unsigned short, "%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "%hx", "hx", check_short);
+ simple_numbers_loop(short, "0x%hx", "hi", check_short);
+ simple_numbers_loop(unsigned short, "0x%hx", "hx", check_ushort);
+ simple_numbers_loop(short, "0x%hx", "hx", check_short);
+
+ simple_numbers_loop(unsigned char, "%hhu", "hhu", check_uchar);
+ simple_numbers_loop(signed char, "%hhd", "hhd", check_char);
+ simple_numbers_loop(signed char, "%hhd", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "%hhx", "hhx", check_char);
+ simple_numbers_loop(signed char, "0x%hhx", "hhi", check_char);
+ simple_numbers_loop(unsigned char, "0x%hhx", "hhx", check_uchar);
+ simple_numbers_loop(signed char, "0x%hhx", "hhx", check_char);
+}
+
+/*
+ * This gives a better variety of number "lengths" in a small sample than
+ * the raw prandom*() functions (Not mathematically rigorous!!).
+ * Variabilty of length and value is more important than perfect randomness.
+ */
+static u32 __init next_test_random(u32 max_bits)
+{
+ u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
+
+ return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
+}
+
+static unsigned long long __init next_test_random_ull(void)
+{
+ u32 rand1 = prandom_u32_state(&rnd_state);
+ u32 n_bits = (hweight32(rand1) * 3) % 64;
+ u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
+
+ return val & GENMASK_ULL(n_bits, 0);
+}
+
+#define random_for_type(T) \
+ ((T)(sizeof(T) <= sizeof(u32) \
+ ? next_test_random(BITS_PER_TYPE(T)) \
+ : next_test_random_ull()))
+
+/*
+ * Define a pattern of negative and positive numbers to ensure we get
+ * some of both within the small number of samples in a test string.
+ */
+#define NEGATIVES_PATTERN 0x3246 /* 00110010 01000110 */
+
+#define fill_random_array(arr) \
+do { \
+ unsigned int neg_pattern = NEGATIVES_PATTERN; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(arr); i++, neg_pattern >>= 1) { \
+ (arr)[i] = random_for_type(typeof((arr)[0])); \
+ if (is_signed_type(typeof((arr)[0])) && (neg_pattern & 1)) \
+ (arr)[i] = -(arr)[i]; \
+ } \
+} while (0)
+
+/*
+ * Convenience wrapper around snprintf() to append at buf_pos in buf,
+ * updating buf_pos and returning the number of characters appended.
+ * On error buf_pos is not changed and return value is 0.
+ */
+static int __init __printf(4, 5)
+append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
+{
+ va_list ap;
+ int field_len;
+
+ va_start(ap, val_fmt);
+ field_len = vsnprintf(buf + *buf_pos, buf_len - *buf_pos, val_fmt, ap);
+ va_end(ap);
+
+ if (field_len < 0)
+ field_len = 0;
+
+ *buf_pos += field_len;
+
+ return field_len;
+}
+
+/*
+ * Convenience function to append the field delimiter string
+ * to both the value string and format string buffers.
+ */
+static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
+ char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
+ const char *delim_str)
+{
+ append_fmt(str_buf, str_buf_pos, str_buf_len, delim_str);
+ append_fmt(fmt_buf, fmt_buf_pos, fmt_buf_len, delim_str);
+}
+
+#define test_array_8(fn, check_data, string, fmt, arr) \
+do { \
+ BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
+ _test(fn, check_data, string, fmt, 8, \
+ &(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
+ &(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
+} while (0)
+
+#define numbers_list_8(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, \
+ field_sep); \
+ \
+ append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, "%%%s", scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+#define numbers_list_fix_width(T, gen_fmt, field_sep, width, scan_fmt, fn) \
+do { \
+ char full_fmt[16]; \
+ \
+ snprintf(full_fmt, sizeof(full_fmt), "%u%s", width, scan_fmt); \
+ numbers_list_8(T, gen_fmt, field_sep, full_fmt, fn); \
+} while (0)
+
+#define numbers_list_val_width(T, gen_fmt, field_sep, scan_fmt, fn) \
+do { \
+ int i, val_len, pos = 0, fmt_pos = 0; \
+ T expect[8], result[8]; \
+ \
+ fill_random_array(expect); \
+ \
+ for (i = 0; i < ARRAY_SIZE(expect); i++) { \
+ if (i != 0) \
+ append_delim(test_buffer, &pos, BUF_SIZE, \
+ fmt_buffer, &fmt_pos, BUF_SIZE, field_sep);\
+ \
+ val_len = append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, \
+ expect[i]); \
+ append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, \
+ "%%%u%s", val_len, scan_fmt); \
+ } \
+ \
+ test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
+} while (0)
+
+static void __init numbers_list_ll(const char *delim)
+{
+ numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_8(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_8(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
+}
+
+static void __init numbers_list_l(const char *delim)
+{
+ numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_8(long, "%ld", delim, "ld", check_long);
+ numbers_list_8(long, "%ld", delim, "li", check_long);
+ numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_8(long, "0x%lx", delim, "li", check_long);
+}
+
+static void __init numbers_list_d(const char *delim)
+{
+ numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_8(int, "%d", delim, "d", check_int);
+ numbers_list_8(int, "%d", delim, "i", check_int);
+ numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_8(int, "0x%x", delim, "i", check_int);
+}
+
+static void __init numbers_list_h(const char *delim)
+{
+ numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_8(short, "%hd", delim, "hd", check_short);
+ numbers_list_8(short, "%hd", delim, "hi", check_short);
+ numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_8(short, "0x%hx", delim, "hi", check_short);
+}
+
+static void __init numbers_list_hh(const char *delim)
+{
+ numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_8(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+static void __init numbers_list(const char *delim)
+{
+ numbers_list_ll(delim);
+ numbers_list_l(delim);
+ numbers_list_d(delim);
+ numbers_list_h(delim);
+ numbers_list_hh(delim);
+}
+
+static void __init numbers_list_field_width_ll(const char *delim)
+{
+ numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
+ numbers_list_fix_width(long long, "%lld", delim, 20, "lli", check_ll);
+ numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
+ numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
+ numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
+}
+
+static void __init numbers_list_field_width_l(const char *delim)
+{
+#if BITS_PER_LONG == 64
+ numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 20, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 16, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 18, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 18, "li", check_long);
+#else
+ numbers_list_fix_width(unsigned long, "%lu", delim, 10, "lu", check_ulong);
+ numbers_list_fix_width(long, "%ld", delim, 11, "ld", check_long);
+ numbers_list_fix_width(long, "%ld", delim, 11, "li", check_long);
+ numbers_list_fix_width(unsigned long, "%lx", delim, 8, "lx", check_ulong);
+ numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
+ numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
+#endif
+}
+
+static void __init numbers_list_field_width_d(const char *delim)
+{
+ numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
+ numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
+ numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
+ numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
+ numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
+ numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
+}
+
+static void __init numbers_list_field_width_h(const char *delim)
+{
+ numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
+ numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
+ numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
+ numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
+ numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
+}
+
+static void __init numbers_list_field_width_hh(const char *delim)
+{
+ numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
+ numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
+ numbers_list_fix_width(unsigned char, "%hhx", delim, 2, "hhx", check_uchar);
+ numbers_list_fix_width(unsigned char, "0x%hhx", delim, 4, "hhx", check_uchar);
+ numbers_list_fix_width(signed char, "0x%hhx", delim, 4, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * maximum possible digits for the given type and base.
+ */
+static void __init numbers_list_field_width_typemax(const char *delim)
+{
+ numbers_list_field_width_ll(delim);
+ numbers_list_field_width_l(delim);
+ numbers_list_field_width_d(delim);
+ numbers_list_field_width_h(delim);
+ numbers_list_field_width_hh(delim);
+}
+
+static void __init numbers_list_field_width_val_ll(const char *delim)
+{
+ numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
+ numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
+ numbers_list_val_width(long long, "%lld", delim, "lli", check_ll);
+ numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
+ numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
+ numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
+}
+
+static void __init numbers_list_field_width_val_l(const char *delim)
+{
+ numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
+ numbers_list_val_width(long, "%ld", delim, "ld", check_long);
+ numbers_list_val_width(long, "%ld", delim, "li", check_long);
+ numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
+ numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
+}
+
+static void __init numbers_list_field_width_val_d(const char *delim)
+{
+ numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
+ numbers_list_val_width(int, "%d", delim, "d", check_int);
+ numbers_list_val_width(int, "%d", delim, "i", check_int);
+ numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
+ numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
+ numbers_list_val_width(int, "0x%x", delim, "i", check_int);
+}
+
+static void __init numbers_list_field_width_val_h(const char *delim)
+{
+ numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
+ numbers_list_val_width(short, "%hd", delim, "hd", check_short);
+ numbers_list_val_width(short, "%hd", delim, "hi", check_short);
+ numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
+ numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
+}
+
+static void __init numbers_list_field_width_val_hh(const char *delim)
+{
+ numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
+ numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
+ numbers_list_val_width(unsigned char, "%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
+ numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char);
+}
+
+/*
+ * List of numbers separated by delim. Each field width specifier is the
+ * exact length of the corresponding value digits in the string being scanned.
+ */
+static void __init numbers_list_field_width_val_width(const char *delim)
+{
+ numbers_list_field_width_val_ll(delim);
+ numbers_list_field_width_val_l(delim);
+ numbers_list_field_width_val_d(delim);
+ numbers_list_field_width_val_h(delim);
+ numbers_list_field_width_val_hh(delim);
+}
+
+/*
+ * Slice a continuous string of digits without field delimiters, containing
+ * numbers of varying length, using the field width to extract each group
+ * of digits. For example the hex values c0,3,bf01,303 would have a
+ * string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
+ */
+static void __init numbers_slice(void)
+{
+ numbers_list_field_width_val_width("");
+}
+
+#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
+do { \
+ const T expect[2] = { expect0, expect1 }; \
+ T result[2] = { (T)~expect[0], (T)~expect[1] }; \
+ \
+ _test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
+} while (0)
+
+/*
+ * Number prefix is >= field width.
+ * Expected behaviour is derived from testing userland sscanf.
+ */
+static void __init numbers_prefix_overflow(void)
+{
+ /*
+ * Negative decimal with a field of width 1, should quit scanning
+ * and return 0.
+ */
+ test_number_prefix(long long, "-1 1", "%1lld %lld", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1ld %ld", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1d %d", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hd %hd", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhd %hhd", 0, 0, 0, check_char);
+
+ test_number_prefix(long long, "-1 1", "%1lli %lli", 0, 0, 0, check_ll);
+ test_number_prefix(long, "-1 1", "%1li %li", 0, 0, 0, check_long);
+ test_number_prefix(int, "-1 1", "%1i %i", 0, 0, 0, check_int);
+ test_number_prefix(short, "-1 1", "%1hi %hi", 0, 0, 0, check_short);
+ test_number_prefix(signed char, "-1 1", "%1hhi %hhi", 0, 0, 0, check_char);
+
+ /*
+ * 0x prefix in a field of width 1: 0 is a valid digit so should
+ * convert. Next field scan starts at the 'x' which isn't a digit so
+ * scan quits with one field converted.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%1llx%llx", 0, 0, 1, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%1lx%lx", 0, 0, 1, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%1x%x", 0, 0, 1, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%1hx%hx", 0, 0, 1, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%1hhx%hhx", 0, 0, 1, check_uchar);
+ test_number_prefix(long long, "0xA7", "%1lli%llx", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%1li%lx", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%1i%x", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%1hi%hx", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%1hhi%hhx", 0, 0, 1, check_char);
+
+ /*
+ * 0x prefix in a field of width 2 using %x conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x".
+ * Both fields will convert.
+ */
+ test_number_prefix(unsigned long long, "0xA7", "%2llx%llx", 0, 0xa7, 2, check_ull);
+ test_number_prefix(unsigned long, "0xA7", "%2lx%lx", 0, 0xa7, 2, check_ulong);
+ test_number_prefix(unsigned int, "0xA7", "%2x%x", 0, 0xa7, 2, check_uint);
+ test_number_prefix(unsigned short, "0xA7", "%2hx%hx", 0, 0xa7, 2, check_ushort);
+ test_number_prefix(unsigned char, "0xA7", "%2hhx%hhx", 0, 0xa7, 2, check_uchar);
+
+ /*
+ * 0x prefix in a field of width 2 using %i conversion: first field
+ * converts to 0. Next field scan starts at the character after "0x",
+ * which will convert if can be interpreted as decimal but will fail
+ * if it contains any hex digits (since no 0x prefix).
+ */
+ test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
+ test_number_prefix(long, "0x67", "%2li%li", 0, 67, 2, check_long);
+ test_number_prefix(int, "0x67", "%2i%i", 0, 67, 2, check_int);
+ test_number_prefix(short, "0x67", "%2hi%hi", 0, 67, 2, check_short);
+ test_number_prefix(char, "0x67", "%2hhi%hhi", 0, 67, 2, check_char);
+
+ test_number_prefix(long long, "0xA7", "%2lli%lli", 0, 0, 1, check_ll);
+ test_number_prefix(long, "0xA7", "%2li%li", 0, 0, 1, check_long);
+ test_number_prefix(int, "0xA7", "%2i%i", 0, 0, 1, check_int);
+ test_number_prefix(short, "0xA7", "%2hi%hi", 0, 0, 1, check_short);
+ test_number_prefix(char, "0xA7", "%2hhi%hhi", 0, 0, 1, check_char);
+}
+
+#define _test_simple_strtoxx(T, fn, gen_fmt, expect, base) \
+do { \
+ T got; \
+ char *endp; \
+ int len; \
+ bool fail = false; \
+ \
+ total_tests++; \
+ len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
+ got = (fn)(test_buffer, &endp, base); \
+ pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
+ if (got != (expect)) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
+ test_buffer, base, got, expect); \
+ } else if (endp != test_buffer + len) { \
+ fail = true; \
+ pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
+ test_buffer, base, test_buffer, \
+ test_buffer + len, endp); \
+ } \
+ \
+ if (fail) \
+ failed_tests++; \
+} while (0)
+
+#define test_simple_strtoxx(T, fn, gen_fmt, base) \
+do { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
+ _test_simple_strtoxx(T, fn, gen_fmt, (T)numbers[i], base); \
+ \
+ if (is_signed_type(T)) \
+ _test_simple_strtoxx(T, fn, gen_fmt, \
+ -(T)numbers[i], base); \
+ } \
+} while (0)
+
+static void __init test_simple_strtoull(void)
+{
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 16);
+ test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoll(void)
+{
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
+ test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
+ test_simple_strtoxx(long long, simple_strtoll, "%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 16);
+ test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
+}
+
+static void __init test_simple_strtoul(void)
+{
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 16);
+ test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
+}
+
+static void __init test_simple_strtol(void)
+{
+ test_simple_strtoxx(long, simple_strtol, "%ld", 10);
+ test_simple_strtoxx(long, simple_strtol, "%ld", 0);
+ test_simple_strtoxx(long, simple_strtol, "%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 16);
+ test_simple_strtoxx(long, simple_strtol, "0x%lx", 0);
+}
+
+/* Selection of common delimiters/separators between numbers in a string. */
+static const char * const number_delimiters[] __initconst = {
+ " ", ":", ",", "-", "/",
+};
+
+static void __init test_numbers(void)
+{
+ int i;
+
+ /* String containing only one number. */
+ numbers_simple();
+
+ /* String with multiple numbers separated by delimiter. */
+ for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
+ numbers_list(number_delimiters[i]);
+
+ /* Field width may be longer than actual field digits. */
+ numbers_list_field_width_typemax(number_delimiters[i]);
+
+ /* Each field width exactly length of actual field digits. */
+ numbers_list_field_width_val_width(number_delimiters[i]);
+ }
+
+ /* Slice continuous sequence of digits using field widths. */
+ numbers_slice();
+
+ numbers_prefix_overflow();
+}
+
+static void __init selftest(void)
+{
+ test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!test_buffer)
+ return;
+
+ fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
+ if (!fmt_buffer) {
+ kfree(test_buffer);
+ return;
+ }
+
+ prandom_seed_state(&rnd_state, 3141592653589793238ULL);
+
+ test_numbers();
+
+ test_simple_strtoull();
+ test_simple_strtoll();
+ test_simple_strtoul();
+ test_simple_strtol();
+
+ kfree(fmt_buffer);
+ kfree(test_buffer);
+}
+
+KSTM_MODULE_LOADERS(test_scanf);
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_sort.c b/lib/test_sort.c
new file mode 100644
index 000000000..be02e3a09
--- /dev/null
+++ b/lib/test_sort.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <kunit/test.h>
+
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+/* a simple boot-time regression test */
+
+#define TEST_LEN 1000
+
+static int cmpint(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static void test_sort(struct kunit *test)
+{
+ int *a, i, r = 1;
+
+ a = kunit_kmalloc_array(test, TEST_LEN, sizeof(*a), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a);
+
+ for (i = 0; i < TEST_LEN; i++) {
+ r = (r * 725861) % 6599;
+ a[i] = r;
+ }
+
+ sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
+
+ for (i = 0; i < TEST_LEN-1; i++)
+ KUNIT_ASSERT_LE(test, a[i], a[i + 1]);
+}
+
+static struct kunit_case sort_test_cases[] = {
+ KUNIT_CASE(test_sort),
+ {}
+};
+
+static struct kunit_suite sort_test_suite = {
+ .name = "lib_sort",
+ .test_cases = sort_test_cases,
+};
+
+kunit_test_suites(&sort_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
new file mode 100644
index 000000000..5089a2e2b
--- /dev/null
+++ b/lib/test_static_key_base.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ * Jason Baron <jbaron@akamai.com>
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_old_true_key);
+struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_inv_old_true_key);
+struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_old_false_key);
+struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_inv_old_false_key);
+
+/* new keys */
+DEFINE_STATIC_KEY_TRUE(base_true_key);
+EXPORT_SYMBOL_GPL(base_true_key);
+DEFINE_STATIC_KEY_TRUE(base_inv_true_key);
+EXPORT_SYMBOL_GPL(base_inv_true_key);
+DEFINE_STATIC_KEY_FALSE(base_false_key);
+EXPORT_SYMBOL_GPL(base_false_key);
+DEFINE_STATIC_KEY_FALSE(base_inv_false_key);
+EXPORT_SYMBOL_GPL(base_inv_false_key);
+
+static void invert_key(struct static_key *key)
+{
+ if (static_key_enabled(key))
+ static_key_disable(key);
+ else
+ static_key_enable(key);
+}
+
+static int __init test_static_key_base_init(void)
+{
+ invert_key(&base_inv_old_true_key);
+ invert_key(&base_inv_old_false_key);
+ invert_key(&base_inv_true_key.key);
+ invert_key(&base_inv_false_key.key);
+
+ return 0;
+}
+
+static void __exit test_static_key_base_exit(void)
+{
+}
+
+module_init(test_static_key_base_init);
+module_exit(test_static_key_base_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
new file mode 100644
index 000000000..42daa74be
--- /dev/null
+++ b/lib/test_static_keys.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ * Jason Baron <jbaron@akamai.com>
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key old_true_key = STATIC_KEY_INIT_TRUE;
+struct static_key old_false_key = STATIC_KEY_INIT_FALSE;
+
+/* new api */
+DEFINE_STATIC_KEY_TRUE(true_key);
+DEFINE_STATIC_KEY_FALSE(false_key);
+
+/* external */
+extern struct static_key base_old_true_key;
+extern struct static_key base_inv_old_true_key;
+extern struct static_key base_old_false_key;
+extern struct static_key base_inv_old_false_key;
+
+/* new api */
+extern struct static_key_true base_true_key;
+extern struct static_key_true base_inv_true_key;
+extern struct static_key_false base_false_key;
+extern struct static_key_false base_inv_false_key;
+
+
+struct test_key {
+ bool init_state;
+ struct static_key *key;
+ bool (*test_key)(void);
+};
+
+#define test_key_func(key, branch) \
+static bool key ## _ ## branch(void) \
+{ \
+ return branch(&key); \
+}
+
+static void invert_key(struct static_key *key)
+{
+ if (static_key_enabled(key))
+ static_key_disable(key);
+ else
+ static_key_enable(key);
+}
+
+static void invert_keys(struct test_key *keys, int size)
+{
+ struct static_key *previous = NULL;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (previous != keys[i].key) {
+ invert_key(keys[i].key);
+ previous = keys[i].key;
+ }
+ }
+}
+
+static int verify_keys(struct test_key *keys, int size, bool invert)
+{
+ int i;
+ bool ret, init;
+
+ for (i = 0; i < size; i++) {
+ ret = static_key_enabled(keys[i].key);
+ init = keys[i].init_state;
+ if (ret != (invert ? !init : init))
+ return -EINVAL;
+ ret = keys[i].test_key();
+ if (static_key_enabled(keys[i].key)) {
+ if (!ret)
+ return -EINVAL;
+ } else {
+ if (ret)
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+test_key_func(old_true_key, static_key_true)
+test_key_func(old_false_key, static_key_false)
+test_key_func(true_key, static_branch_likely)
+test_key_func(true_key, static_branch_unlikely)
+test_key_func(false_key, static_branch_likely)
+test_key_func(false_key, static_branch_unlikely)
+test_key_func(base_old_true_key, static_key_true)
+test_key_func(base_inv_old_true_key, static_key_true)
+test_key_func(base_old_false_key, static_key_false)
+test_key_func(base_inv_old_false_key, static_key_false)
+test_key_func(base_true_key, static_branch_likely)
+test_key_func(base_true_key, static_branch_unlikely)
+test_key_func(base_inv_true_key, static_branch_likely)
+test_key_func(base_inv_true_key, static_branch_unlikely)
+test_key_func(base_false_key, static_branch_likely)
+test_key_func(base_false_key, static_branch_unlikely)
+test_key_func(base_inv_false_key, static_branch_likely)
+test_key_func(base_inv_false_key, static_branch_unlikely)
+
+static int __init test_static_key_init(void)
+{
+ int ret;
+ int size;
+
+ struct test_key static_key_tests[] = {
+ /* internal keys - old keys */
+ {
+ .init_state = true,
+ .key = &old_true_key,
+ .test_key = &old_true_key_static_key_true,
+ },
+ {
+ .init_state = false,
+ .key = &old_false_key,
+ .test_key = &old_false_key_static_key_false,
+ },
+ /* internal keys - new keys */
+ {
+ .init_state = true,
+ .key = &true_key.key,
+ .test_key = &true_key_static_branch_likely,
+ },
+ {
+ .init_state = true,
+ .key = &true_key.key,
+ .test_key = &true_key_static_branch_unlikely,
+ },
+ {
+ .init_state = false,
+ .key = &false_key.key,
+ .test_key = &false_key_static_branch_likely,
+ },
+ {
+ .init_state = false,
+ .key = &false_key.key,
+ .test_key = &false_key_static_branch_unlikely,
+ },
+ /* external keys - old keys */
+ {
+ .init_state = true,
+ .key = &base_old_true_key,
+ .test_key = &base_old_true_key_static_key_true,
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_old_true_key,
+ .test_key = &base_inv_old_true_key_static_key_true,
+ },
+ {
+ .init_state = false,
+ .key = &base_old_false_key,
+ .test_key = &base_old_false_key_static_key_false,
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_old_false_key,
+ .test_key = &base_inv_old_false_key_static_key_false,
+ },
+ /* external keys - new keys */
+ {
+ .init_state = true,
+ .key = &base_true_key.key,
+ .test_key = &base_true_key_static_branch_likely,
+ },
+ {
+ .init_state = true,
+ .key = &base_true_key.key,
+ .test_key = &base_true_key_static_branch_unlikely,
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_true_key.key,
+ .test_key = &base_inv_true_key_static_branch_likely,
+ },
+ {
+ .init_state = false,
+ .key = &base_inv_true_key.key,
+ .test_key = &base_inv_true_key_static_branch_unlikely,
+ },
+ {
+ .init_state = false,
+ .key = &base_false_key.key,
+ .test_key = &base_false_key_static_branch_likely,
+ },
+ {
+ .init_state = false,
+ .key = &base_false_key.key,
+ .test_key = &base_false_key_static_branch_unlikely,
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_false_key.key,
+ .test_key = &base_inv_false_key_static_branch_likely,
+ },
+ {
+ .init_state = true,
+ .key = &base_inv_false_key.key,
+ .test_key = &base_inv_false_key_static_branch_unlikely,
+ },
+ };
+
+ size = ARRAY_SIZE(static_key_tests);
+
+ ret = verify_keys(static_key_tests, size, false);
+ if (ret)
+ goto out;
+
+ invert_keys(static_key_tests, size);
+ ret = verify_keys(static_key_tests, size, true);
+ if (ret)
+ goto out;
+
+ invert_keys(static_key_tests, size);
+ ret = verify_keys(static_key_tests, size, false);
+ if (ret)
+ goto out;
+ return 0;
+out:
+ return ret;
+}
+
+static void __exit test_static_key_exit(void)
+{
+}
+
+module_init(test_static_key_init);
+module_exit(test_static_key_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_string.c b/lib/test_string.c
new file mode 100644
index 000000000..c5cb92fb7
--- /dev/null
+++ b/lib/test_string.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+static __init int memset16_selftest(void)
+{
+ unsigned i, j, k;
+ u16 v, *p;
+
+ p = kmalloc(256 * 2 * 2, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset16(p + i, 0xb1b2, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ if (v != 0xa1a1)
+ goto fail;
+ } else if (k < i + j) {
+ if (v != 0xb1b2)
+ goto fail;
+ } else {
+ if (v != 0xa1a1)
+ goto fail;
+ }
+ }
+ }
+ }
+
+fail:
+ kfree(p);
+ if (i < 256)
+ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+}
+
+static __init int memset32_selftest(void)
+{
+ unsigned i, j, k;
+ u32 v, *p;
+
+ p = kmalloc(256 * 2 * 4, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset32(p + i, 0xb1b2b3b4, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ if (v != 0xa1a1a1a1)
+ goto fail;
+ } else if (k < i + j) {
+ if (v != 0xb1b2b3b4)
+ goto fail;
+ } else {
+ if (v != 0xa1a1a1a1)
+ goto fail;
+ }
+ }
+ }
+ }
+
+fail:
+ kfree(p);
+ if (i < 256)
+ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+}
+
+static __init int memset64_selftest(void)
+{
+ unsigned i, j, k;
+ u64 v, *p;
+
+ p = kmalloc(256 * 2 * 8, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ if (v != 0xa1a1a1a1a1a1a1a1ULL)
+ goto fail;
+ } else if (k < i + j) {
+ if (v != 0xb1b2b3b4b5b6b7b8ULL)
+ goto fail;
+ } else {
+ if (v != 0xa1a1a1a1a1a1a1a1ULL)
+ goto fail;
+ }
+ }
+ }
+ }
+
+fail:
+ kfree(p);
+ if (i < 256)
+ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+}
+
+static __init int strchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ if (result - test_string != i)
+ return i + 'a';
+ }
+
+ result = strchr(empty_string, '\0');
+ if (result != empty_string)
+ return 0x101;
+
+ result = strchr(empty_string, 'a');
+ if (result)
+ return 0x102;
+
+ result = strchr(test_string, 'z');
+ if (result)
+ return 0x103;
+
+ return 0;
+}
+
+static __init int strnchr_selftest(void)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ if (!result)
+ continue;
+ return ((i + 'a') << 8) | j;
+ }
+ if (result - test_string != i)
+ return ((i + 'a') << 8) | j;
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ if (result)
+ return 0x10001;
+
+ result = strnchr(empty_string, 1, '\0');
+ if (result != empty_string)
+ return 0x10002;
+
+ result = strnchr(empty_string, 1, 'a');
+ if (result)
+ return 0x10003;
+
+ result = strnchr(NULL, 0, '\0');
+ if (result)
+ return 0x10004;
+
+ return 0;
+}
+
+static __init int strspn_selftest(void)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] __initconst = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i, res;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ res = strspn(s->str, s->accept);
+ if (res != s->a)
+ return 0x100 + 2*i;
+ res = strcspn(s->str, s->reject);
+ if (res != s->r)
+ return 0x100 + 2*i + 1;
+ }
+ return 0;
+}
+
+static __exit void string_selftest_remove(void)
+{
+}
+
+static __init int string_selftest_init(void)
+{
+ int test, subtest;
+
+ test = 1;
+ subtest = memset16_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 2;
+ subtest = memset32_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 3;
+ subtest = memset64_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 4;
+ subtest = strchr_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 5;
+ subtest = strnchr_selftest();
+ if (subtest)
+ goto fail;
+
+ test = 6;
+ subtest = strspn_selftest();
+ if (subtest)
+ goto fail;
+
+ pr_info("String selftests succeeded\n");
+ return 0;
+fail:
+ pr_crit("String selftest failure %d.%08x\n", test, subtest);
+ return 0;
+}
+
+module_init(string_selftest_init);
+module_exit(string_selftest_remove);
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
new file mode 100644
index 000000000..8036aa91a
--- /dev/null
+++ b/lib/test_sysctl.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
+/*
+ * proc sysctl test driver
+ *
+ * Copyright (C) 2017 Luis R. Rodriguez <mcgrof@kernel.org>
+ */
+
+/*
+ * This module provides an interface to the proc sysctl interfaces. This
+ * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the
+ * system unless explicitly requested by name. You can also build this driver
+ * into your kernel.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/async.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+
+static int i_zero;
+static int i_one_hundred = 100;
+static int match_int_ok = 1;
+
+
+static struct {
+ struct ctl_table_header *test_h_setup_node;
+ struct ctl_table_header *test_h_mnt;
+ struct ctl_table_header *test_h_mnterror;
+} sysctl_test_headers;
+
+struct test_sysctl_data {
+ int int_0001;
+ int int_0002;
+ int int_0003[4];
+
+ int boot_int;
+
+ unsigned int uint_0001;
+
+ char string_0001[65];
+
+#define SYSCTL_TEST_BITMAP_SIZE 65536
+ unsigned long *bitmap_0001;
+};
+
+static struct test_sysctl_data test_data = {
+ .int_0001 = 60,
+ .int_0002 = 1,
+
+ .int_0003[0] = 0,
+ .int_0003[1] = 1,
+ .int_0003[2] = 2,
+ .int_0003[3] = 3,
+
+ .boot_int = 0,
+
+ .uint_0001 = 314,
+
+ .string_0001 = "(none)",
+};
+
+/* These are all under /proc/sys/debug/test_sysctl/ */
+static struct ctl_table test_table[] = {
+ {
+ .procname = "int_0001",
+ .data = &test_data.int_0001,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &i_zero,
+ .extra2 = &i_one_hundred,
+ },
+ {
+ .procname = "int_0002",
+ .data = &test_data.int_0002,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "int_0003",
+ .data = &test_data.int_0003,
+ .maxlen = sizeof(test_data.int_0003),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "match_int",
+ .data = &match_int_ok,
+ .maxlen = sizeof(match_int_ok),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "boot_int",
+ .data = &test_data.boot_int,
+ .maxlen = sizeof(test_data.boot_int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "uint_0001",
+ .data = &test_data.uint_0001,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "string_0001",
+ .data = &test_data.string_0001,
+ .maxlen = sizeof(test_data.string_0001),
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
+ {
+ .procname = "bitmap_0001",
+ .data = &test_data.bitmap_0001,
+ .maxlen = SYSCTL_TEST_BITMAP_SIZE,
+ .mode = 0644,
+ .proc_handler = proc_do_large_bitmap,
+ },
+ { }
+};
+
+static void test_sysctl_calc_match_int_ok(void)
+{
+ int i;
+
+ struct {
+ int defined;
+ int wanted;
+ } match_int[] = {
+ {.defined = *(int *)SYSCTL_ZERO, .wanted = 0},
+ {.defined = *(int *)SYSCTL_ONE, .wanted = 1},
+ {.defined = *(int *)SYSCTL_TWO, .wanted = 2},
+ {.defined = *(int *)SYSCTL_THREE, .wanted = 3},
+ {.defined = *(int *)SYSCTL_FOUR, .wanted = 4},
+ {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100},
+ {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200},
+ {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000},
+ {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000},
+ {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX},
+ {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535},
+ {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(match_int); i++)
+ if (match_int[i].defined != match_int[i].wanted)
+ match_int_ok = 0;
+}
+
+static int test_sysctl_setup_node_tests(void)
+{
+ test_sysctl_calc_match_int_ok();
+ test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
+ if (!test_data.bitmap_0001)
+ return -ENOMEM;
+ sysctl_test_headers.test_h_setup_node = register_sysctl("debug/test_sysctl", test_table);
+ if (!sysctl_test_headers.test_h_setup_node) {
+ kfree(test_data.bitmap_0001);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Used to test that unregister actually removes the directory */
+static struct ctl_table test_table_unregister[] = {
+ {
+ .procname = "unregister_error",
+ .data = &test_data.int_0001,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+ {}
+};
+
+static int test_sysctl_run_unregister_nested(void)
+{
+ struct ctl_table_header *unregister;
+
+ unregister = register_sysctl("debug/test_sysctl/unregister_error",
+ test_table_unregister);
+ if (!unregister)
+ return -ENOMEM;
+
+ unregister_sysctl_table(unregister);
+ return 0;
+}
+
+static int test_sysctl_run_register_mount_point(void)
+{
+ sysctl_test_headers.test_h_mnt
+ = register_sysctl_mount_point("debug/test_sysctl/mnt");
+ if (!sysctl_test_headers.test_h_mnt)
+ return -ENOMEM;
+
+ sysctl_test_headers.test_h_mnterror
+ = register_sysctl("debug/test_sysctl/mnt/mnt_error",
+ test_table_unregister);
+ /*
+ * Don't check the result.:
+ * If it fails (expected behavior), return 0.
+ * If successful (missbehavior of register mount point), we want to see
+ * mnt_error when we run the sysctl test script
+ */
+
+ return 0;
+}
+
+static int __init test_sysctl_init(void)
+{
+ int err;
+
+ err = test_sysctl_setup_node_tests();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_unregister_nested();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_register_mount_point();
+
+out:
+ return err;
+}
+module_init(test_sysctl_init);
+
+static void __exit test_sysctl_exit(void)
+{
+ kfree(test_data.bitmap_0001);
+ if (sysctl_test_headers.test_h_setup_node)
+ unregister_sysctl_table(sysctl_test_headers.test_h_setup_node);
+ if (sysctl_test_headers.test_h_mnt)
+ unregister_sysctl_table(sysctl_test_headers.test_h_mnt);
+ if (sysctl_test_headers.test_h_mnterror)
+ unregister_sysctl_table(sysctl_test_headers.test_h_mnterror);
+}
+
+module_exit(test_sysctl_exit);
+
+MODULE_AUTHOR("Luis R. Rodriguez <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
new file mode 100644
index 000000000..2062be1f2
--- /dev/null
+++ b/lib/test_ubsan.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+typedef void(*test_ubsan_fp)(void);
+
+#define UBSAN_TEST(config, ...) do { \
+ pr_info("%s " __VA_ARGS__ "%s(%s=%s)\n", __func__, \
+ sizeof(" " __VA_ARGS__) > 2 ? " " : "", \
+ #config, IS_ENABLED(config) ? "y" : "n"); \
+ } while (0)
+
+static void test_ubsan_divrem_overflow(void)
+{
+ volatile int val = 16;
+ volatile int val2 = 0;
+
+ UBSAN_TEST(CONFIG_UBSAN_DIV_ZERO);
+ val /= val2;
+}
+
+static void test_ubsan_shift_out_of_bounds(void)
+{
+ volatile int neg = -1, wrap = 4;
+ int val1 = 10;
+ int val2 = INT_MAX;
+
+ UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent");
+ val1 <<= neg;
+
+ UBSAN_TEST(CONFIG_UBSAN_SHIFT, "left overflow");
+ val2 <<= wrap;
+}
+
+static void test_ubsan_out_of_bounds(void)
+{
+ volatile int i = 4, j = 5, k = -1;
+ volatile char above[4] = { }; /* Protect surrounding memory. */
+ volatile int arr[4];
+ volatile char below[4] = { }; /* Protect surrounding memory. */
+
+ above[0] = below[0];
+
+ UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above");
+ arr[j] = i;
+
+ UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below");
+ arr[k] = i;
+}
+
+enum ubsan_test_enum {
+ UBSAN_TEST_ZERO = 0,
+ UBSAN_TEST_ONE,
+ UBSAN_TEST_MAX,
+};
+
+static void test_ubsan_load_invalid_value(void)
+{
+ volatile char *dst, *src;
+ bool val, val2, *ptr;
+ enum ubsan_test_enum eval, eval2, *eptr;
+ unsigned char c = 0xff;
+
+ UBSAN_TEST(CONFIG_UBSAN_BOOL, "bool");
+ dst = (char *)&val;
+ src = &c;
+ *dst = *src;
+
+ ptr = &val2;
+ val2 = val;
+
+ UBSAN_TEST(CONFIG_UBSAN_ENUM, "enum");
+ dst = (char *)&eval;
+ src = &c;
+ *dst = *src;
+
+ eptr = &eval2;
+ eval2 = eval;
+}
+
+static void test_ubsan_misaligned_access(void)
+{
+ volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
+ volatile int *ptr, val = 6;
+
+ UBSAN_TEST(CONFIG_UBSAN_ALIGNMENT);
+ ptr = (int *)(arr + 1);
+ *ptr = val;
+}
+
+static const test_ubsan_fp test_ubsan_array[] = {
+ test_ubsan_shift_out_of_bounds,
+ test_ubsan_out_of_bounds,
+ test_ubsan_load_invalid_value,
+ test_ubsan_misaligned_access,
+};
+
+/* Excluded because they Oops the module. */
+static const test_ubsan_fp skip_ubsan_array[] = {
+ test_ubsan_divrem_overflow,
+};
+
+static int __init test_ubsan_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++)
+ test_ubsan_array[i]();
+
+ return 0;
+}
+module_init(test_ubsan_init);
+
+static void __exit test_ubsan_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_ubsan_exit);
+
+MODULE_AUTHOR("Jinbum Park <jinb.park7@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
new file mode 100644
index 000000000..5ff04d8fe
--- /dev/null
+++ b/lib/test_user_copy.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel module for testing copy_to/from_user infrastructure.
+ *
+ * Copyright 2013 Google Inc. All Rights Reserved
+ *
+ * Authors:
+ * Kees Cook <keescook@chromium.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Several 32-bit architectures support 64-bit {get,put}_user() calls.
+ * As there doesn't appear to be anything that can safely determine
+ * their capability at compile-time, we just have to opt-out certain archs.
+ */
+#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
+ !defined(CONFIG_M68K) && \
+ !defined(CONFIG_MICROBLAZE) && \
+ !defined(CONFIG_NIOS2) && \
+ !defined(CONFIG_PPC32) && \
+ !defined(CONFIG_SUPERH))
+# define TEST_U64
+#endif
+
+#define test(condition, msg, ...) \
+({ \
+ int cond = (condition); \
+ if (cond) \
+ pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \
+ cond; \
+})
+
+static bool is_zeroed(void *from, size_t size)
+{
+ return memchr_inv(from, 0x0, size) == NULL;
+}
+
+static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
+{
+ int ret = 0;
+ size_t start, end, i, zero_start, zero_end;
+
+ if (test(size < 2 * PAGE_SIZE, "buffer too small"))
+ return -EINVAL;
+
+ /*
+ * We want to cross a page boundary to exercise the code more
+ * effectively. We also don't want to make the size we scan too large,
+ * otherwise the test can take a long time and cause soft lockups. So
+ * scan a 1024 byte region across the page boundary.
+ */
+ size = 1024;
+ start = PAGE_SIZE - (size / 2);
+
+ kmem += start;
+ umem += start;
+
+ zero_start = size / 4;
+ zero_end = size - zero_start;
+
+ /*
+ * We conduct a series of check_nonzero_user() tests on a block of
+ * memory with the following byte-pattern (trying every possible
+ * [start,end] pair):
+ *
+ * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+ *
+ * And we verify that check_nonzero_user() acts identically to
+ * memchr_inv().
+ */
+
+ memset(kmem, 0x0, size);
+ for (i = 1; i < zero_start; i += 2)
+ kmem[i] = 0xff;
+ for (i = zero_end; i < size; i += 2)
+ kmem[i] = 0xff;
+
+ ret |= test(copy_to_user(umem, kmem, size),
+ "legitimate copy_to_user failed");
+
+ for (start = 0; start <= size; start++) {
+ for (end = start; end <= size; end++) {
+ size_t len = end - start;
+ int retval = check_zeroed_user(umem + start, len);
+ int expected = is_zeroed(kmem + start, len);
+
+ ret |= test(retval != expected,
+ "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+ retval, expected, start, end);
+ }
+ }
+
+ return ret;
+}
+
+static int test_copy_struct_from_user(char *kmem, char __user *umem,
+ size_t size)
+{
+ int ret = 0;
+ char *umem_src = NULL, *expected = NULL;
+ size_t ksize, usize;
+
+ umem_src = kmalloc(size, GFP_KERNEL);
+ ret = test(umem_src == NULL, "kmalloc failed");
+ if (ret)
+ goto out_free;
+
+ expected = kmalloc(size, GFP_KERNEL);
+ ret = test(expected == NULL, "kmalloc failed");
+ if (ret)
+ goto out_free;
+
+ /* Fill umem with a fixed byte pattern. */
+ memset(umem_src, 0x3e, size);
+ ret |= test(copy_to_user(umem, umem_src, size),
+ "legitimate copy_to_user failed");
+
+ /* Check basic case -- (usize == ksize). */
+ ksize = size;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+ "copy_struct_from_user(usize == ksize) failed");
+ ret |= test(memcmp(kmem, expected, ksize),
+ "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+ /* Old userspace case -- (usize < ksize). */
+ ksize = size;
+ usize = size / 2;
+
+ memcpy(expected, umem_src, usize);
+ memset(expected + usize, 0x0, ksize - usize);
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+ "copy_struct_from_user(usize < ksize) failed");
+ ret |= test(memcmp(kmem, expected, ksize),
+ "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+ /* New userspace (-E2BIG) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
+ "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+ /* New userspace (success) case -- (usize > ksize). */
+ ksize = size / 2;
+ usize = size;
+
+ memcpy(expected, umem_src, ksize);
+ ret |= test(clear_user(umem + ksize, usize - ksize),
+ "legitimate clear_user failed");
+
+ memset(kmem, 0x0, size);
+ ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+ "copy_struct_from_user(usize > ksize) failed");
+ ret |= test(memcmp(kmem, expected, ksize),
+ "copy_struct_from_user(usize > ksize) gives unexpected copy");
+
+out_free:
+ kfree(expected);
+ kfree(umem_src);
+ return ret;
+}
+
+static int __init test_user_copy_init(void)
+{
+ int ret = 0;
+ char *kmem;
+ char __user *usermem;
+ char *bad_usermem;
+ unsigned long user_addr;
+ u8 val_u8;
+ u16 val_u16;
+ u32 val_u32;
+#ifdef TEST_U64
+ u64 val_u64;
+#endif
+
+ kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+ if (!kmem)
+ return -ENOMEM;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= (unsigned long)(TASK_SIZE)) {
+ pr_warn("Failed to allocate user memory\n");
+ kfree(kmem);
+ return -ENOMEM;
+ }
+
+ usermem = (char __user *)user_addr;
+ bad_usermem = (char *)user_addr;
+
+ /*
+ * Legitimate usage: none of these copies should fail.
+ */
+ memset(kmem, 0x3a, PAGE_SIZE * 2);
+ ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
+ "legitimate copy_to_user failed");
+ memset(kmem, 0x0, PAGE_SIZE);
+ ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
+ "legitimate copy_from_user failed");
+ ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
+ "legitimate usercopy failed to copy data");
+
+#define test_legit(size, check) \
+ do { \
+ val_##size = check; \
+ ret |= test(put_user(val_##size, (size __user *)usermem), \
+ "legitimate put_user (" #size ") failed"); \
+ val_##size = 0; \
+ ret |= test(get_user(val_##size, (size __user *)usermem), \
+ "legitimate get_user (" #size ") failed"); \
+ ret |= test(val_##size != check, \
+ "legitimate get_user (" #size ") failed to do copy"); \
+ if (val_##size != check) { \
+ pr_info("0x%llx != 0x%llx\n", \
+ (unsigned long long)val_##size, \
+ (unsigned long long)check); \
+ } \
+ } while (0)
+
+ test_legit(u8, 0x5a);
+ test_legit(u16, 0x5a5b);
+ test_legit(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_legit(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_legit
+
+ /* Test usage of check_nonzero_user(). */
+ ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
+ /* Test usage of copy_struct_from_user(). */
+ ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
+
+ /*
+ * Invalid usage: none of these copies should succeed.
+ */
+
+ /* Prepare kernel memory with check values. */
+ memset(kmem, 0x5a, PAGE_SIZE);
+ memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
+ ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
+ PAGE_SIZE),
+ "illegal all-kernel copy_from_user passed");
+
+ /* Destination half of buffer should have been zeroed. */
+ ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
+ "zeroing failure for illegal all-kernel copy_from_user");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
+ ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
+ PAGE_SIZE),
+ "illegal reversed copy_from_user passed");
+#endif
+ ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
+ PAGE_SIZE),
+ "illegal all-kernel copy_to_user passed");
+ ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
+ PAGE_SIZE),
+ "illegal reversed copy_to_user passed");
+
+#define test_illegal(size, check) \
+ do { \
+ val_##size = (check); \
+ ret |= test(!get_user(val_##size, (size __user *)kmem), \
+ "illegal get_user (" #size ") passed"); \
+ ret |= test(val_##size != (size)0, \
+ "zeroing failure for illegal get_user (" #size ")"); \
+ if (val_##size != (size)0) { \
+ pr_info("0x%llx != 0\n", \
+ (unsigned long long)val_##size); \
+ } \
+ ret |= test(!put_user(val_##size, (size __user *)kmem), \
+ "illegal put_user (" #size ") passed"); \
+ } while (0)
+
+ test_illegal(u8, 0x5a);
+ test_illegal(u16, 0x5a5b);
+ test_illegal(u32, 0x5a5b5c5d);
+#ifdef TEST_U64
+ test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
+#endif
+#undef test_illegal
+
+ vm_munmap(user_addr, PAGE_SIZE * 2);
+ kfree(kmem);
+
+ if (ret == 0) {
+ pr_info("tests passed.\n");
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+module_init(test_user_copy_init);
+
+static void __exit test_user_copy_exit(void)
+{
+ pr_info("unloaded.\n");
+}
+
+module_exit(test_user_copy_exit);
+
+MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644
index 000000000..cd819c397
--- /dev/null
+++ b/lib/test_uuid.c
@@ -0,0 +1,133 @@
+/*
+ * Test cases for lib/uuid.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+ const char *uuid;
+ guid_t le;
+ uuid_t be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+ {
+ .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+ .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ },
+ {
+ .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+ .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ },
+ {
+ .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+ .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ },
+};
+
+static const char * const test_uuid_wrong_data[] = {
+ "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
+ "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
+ "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
+};
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
+ const char *data, const char *actual)
+{
+ pr_err("%s test #%u %s %s data: '%s'\n",
+ prefix,
+ total_tests,
+ wrong ? "passed on wrong" : "failed on",
+ be ? "BE" : "LE",
+ data);
+ if (actual && *actual)
+ pr_err("%s test #%u actual data: '%s'\n",
+ prefix,
+ total_tests,
+ actual);
+ failed_tests++;
+}
+
+static void __init test_uuid_test(const struct test_uuid_data *data)
+{
+ guid_t le;
+ uuid_t be;
+ char buf[48];
+
+ /* LE */
+ total_tests++;
+ if (guid_parse(data->uuid, &le))
+ test_uuid_failed("conversion", false, false, data->uuid, NULL);
+
+ total_tests++;
+ if (!guid_equal(&data->le, &le)) {
+ sprintf(buf, "%pUl", &le);
+ test_uuid_failed("cmp", false, false, data->uuid, buf);
+ }
+
+ /* BE */
+ total_tests++;
+ if (uuid_parse(data->uuid, &be))
+ test_uuid_failed("conversion", false, true, data->uuid, NULL);
+
+ total_tests++;
+ if (!uuid_equal(&data->be, &be)) {
+ sprintf(buf, "%pUb", &be);
+ test_uuid_failed("cmp", false, true, data->uuid, buf);
+ }
+}
+
+static void __init test_uuid_wrong(const char *data)
+{
+ guid_t le;
+ uuid_t be;
+
+ /* LE */
+ total_tests++;
+ if (!guid_parse(data, &le))
+ test_uuid_failed("negative", true, false, data, NULL);
+
+ /* BE */
+ total_tests++;
+ if (!uuid_parse(data, &be))
+ test_uuid_failed("negative", true, true, data, NULL);
+}
+
+static int __init test_uuid_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
+ test_uuid_test(&test_uuid_test_data[i]);
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
+ test_uuid_wrong(test_uuid_wrong_data[i]);
+
+ if (failed_tests == 0)
+ pr_info("all %u tests passed\n", total_tests);
+ else
+ pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+ return failed_tests ? -EINVAL : 0;
+}
+module_init(test_uuid_init);
+
+static void __exit test_uuid_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_uuid_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
new file mode 100644
index 000000000..3718d9886
--- /dev/null
+++ b/lib/test_vmalloc.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Test module for stress and analyze performance of vmalloc allocator.
+ * (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com>
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/kthread.h>
+#include <linux/moduleparam.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/rwsem.h>
+#include <linux/mm.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg) \
+
+__param(int, nr_threads, 0,
+ "Number of workers to perform tests(min: 1 max: USHRT_MAX)");
+
+__param(bool, sequential_test_order, false,
+ "Use sequential stress tests order");
+
+__param(int, test_repeat_count, 1,
+ "Set test repeat counter");
+
+__param(int, test_loop_count, 1000000,
+ "Set test loop counter");
+
+__param(int, nr_pages, 0,
+ "Set number of pages for fix_size_alloc_test(default: 1)");
+
+__param(bool, use_huge, false,
+ "Use vmalloc_huge in fix_size_alloc_test");
+
+__param(int, run_test_mask, INT_MAX,
+ "Set tests specified in the mask.\n\n"
+ "\t\tid: 1, name: fix_size_alloc_test\n"
+ "\t\tid: 2, name: full_fit_alloc_test\n"
+ "\t\tid: 4, name: long_busy_list_alloc_test\n"
+ "\t\tid: 8, name: random_size_alloc_test\n"
+ "\t\tid: 16, name: fix_align_alloc_test\n"
+ "\t\tid: 32, name: random_size_align_alloc_test\n"
+ "\t\tid: 64, name: align_shift_alloc_test\n"
+ "\t\tid: 128, name: pcpu_alloc_test\n"
+ "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n"
+ "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n"
+ "\t\tid: 1024, name: vm_map_ram_test\n"
+ /* Add a new test case description here. */
+);
+
+/*
+ * Read write semaphore for synchronization of setup
+ * phase that is done in main thread and workers.
+ */
+static DECLARE_RWSEM(prepare_for_test_rwsem);
+
+/*
+ * Completion tracking for worker threads.
+ */
+static DECLARE_COMPLETION(test_all_done_comp);
+static atomic_t test_n_undone = ATOMIC_INIT(0);
+
+static inline void
+test_report_one_done(void)
+{
+ if (atomic_dec_and_test(&test_n_undone))
+ complete(&test_all_done_comp);
+}
+
+static int random_size_align_alloc_test(void)
+{
+ unsigned long size, align;
+ unsigned int rnd;
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ rnd = get_random_u8();
+
+ /*
+ * Maximum 1024 pages, if PAGE_SIZE is 4096.
+ */
+ align = 1 << (rnd % 23);
+
+ /*
+ * Maximum 10 pages.
+ */
+ size = ((rnd % 10) + 1) * PAGE_SIZE;
+
+ ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0,
+ __builtin_return_address(0));
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+/*
+ * This test case is supposed to be failed.
+ */
+static int align_shift_alloc_test(void)
+{
+ unsigned long align;
+ void *ptr;
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++) {
+ align = ((unsigned long) 1) << i;
+
+ ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
+ __builtin_return_address(0));
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int fix_align_alloc_test(void)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1,
+ GFP_KERNEL | __GFP_ZERO, 0,
+ __builtin_return_address(0));
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int random_size_alloc_test(void)
+{
+ unsigned int n;
+ void *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ n = get_random_u32_inclusive(1, 100);
+ p = vmalloc(n * PAGE_SIZE);
+
+ if (!p)
+ return -1;
+
+ *((__u8 *)p) = 1;
+ vfree(p);
+ }
+
+ return 0;
+}
+
+static int long_busy_list_alloc_test(void)
+{
+ void *ptr_1, *ptr_2;
+ void **ptr;
+ int rv = -1;
+ int i;
+
+ ptr = vmalloc(sizeof(void *) * 15000);
+ if (!ptr)
+ return rv;
+
+ for (i = 0; i < 15000; i++)
+ ptr[i] = vmalloc(1 * PAGE_SIZE);
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr_1 = vmalloc(100 * PAGE_SIZE);
+ if (!ptr_1)
+ goto leave;
+
+ ptr_2 = vmalloc(1 * PAGE_SIZE);
+ if (!ptr_2) {
+ vfree(ptr_1);
+ goto leave;
+ }
+
+ *((__u8 *)ptr_1) = 0;
+ *((__u8 *)ptr_2) = 1;
+
+ vfree(ptr_1);
+ vfree(ptr_2);
+ }
+
+ /* Success */
+ rv = 0;
+
+leave:
+ for (i = 0; i < 15000; i++)
+ vfree(ptr[i]);
+
+ vfree(ptr);
+ return rv;
+}
+
+static int full_fit_alloc_test(void)
+{
+ void **ptr, **junk_ptr, *tmp;
+ int junk_length;
+ int rv = -1;
+ int i;
+
+ junk_length = fls(num_online_cpus());
+ junk_length *= (32 * 1024 * 1024 / PAGE_SIZE);
+
+ ptr = vmalloc(sizeof(void *) * junk_length);
+ if (!ptr)
+ return rv;
+
+ junk_ptr = vmalloc(sizeof(void *) * junk_length);
+ if (!junk_ptr) {
+ vfree(ptr);
+ return rv;
+ }
+
+ for (i = 0; i < junk_length; i++) {
+ ptr[i] = vmalloc(1 * PAGE_SIZE);
+ junk_ptr[i] = vmalloc(1 * PAGE_SIZE);
+ }
+
+ for (i = 0; i < junk_length; i++)
+ vfree(junk_ptr[i]);
+
+ for (i = 0; i < test_loop_count; i++) {
+ tmp = vmalloc(1 * PAGE_SIZE);
+
+ if (!tmp)
+ goto error;
+
+ *((__u8 *)tmp) = 1;
+ vfree(tmp);
+ }
+
+ /* Success */
+ rv = 0;
+
+error:
+ for (i = 0; i < junk_length; i++)
+ vfree(ptr[i]);
+
+ vfree(ptr);
+ vfree(junk_ptr);
+
+ return rv;
+}
+
+static int fix_size_alloc_test(void)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ if (use_huge)
+ ptr = vmalloc_huge((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE, GFP_KERNEL);
+ else
+ ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE);
+
+ if (!ptr)
+ return -1;
+
+ *((__u8 *)ptr) = 0;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int
+pcpu_alloc_test(void)
+{
+ int rv = 0;
+#ifndef CONFIG_NEED_PER_CPU_KM
+ void __percpu **pcpu;
+ size_t size, align;
+ int i;
+
+ pcpu = vmalloc(sizeof(void __percpu *) * 35000);
+ if (!pcpu)
+ return -1;
+
+ for (i = 0; i < 35000; i++) {
+ size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
+
+ /*
+ * Maximum PAGE_SIZE
+ */
+ align = 1 << get_random_u32_inclusive(1, 11);
+
+ pcpu[i] = __alloc_percpu(size, align);
+ if (!pcpu[i])
+ rv = -1;
+ }
+
+ for (i = 0; i < 35000; i++)
+ free_percpu(pcpu[i]);
+
+ vfree(pcpu);
+#endif
+ return rv;
+}
+
+struct test_kvfree_rcu {
+ struct rcu_head rcu;
+ unsigned char array[20];
+};
+
+static int
+kvfree_rcu_1_arg_vmalloc_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = vmalloc(1 * PAGE_SIZE);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu_mightsleep(p);
+ }
+
+ return 0;
+}
+
+static int
+kvfree_rcu_2_arg_vmalloc_test(void)
+{
+ struct test_kvfree_rcu *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ p = vmalloc(1 * PAGE_SIZE);
+ if (!p)
+ return -1;
+
+ p->array[0] = 'a';
+ kvfree_rcu(p, rcu);
+ }
+
+ return 0;
+}
+
+static int
+vm_map_ram_test(void)
+{
+ unsigned long nr_allocated;
+ unsigned int map_nr_pages;
+ unsigned char *v_ptr;
+ struct page **pages;
+ int i;
+
+ map_nr_pages = nr_pages > 0 ? nr_pages:1;
+ pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -1;
+
+ nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages);
+ if (nr_allocated != map_nr_pages)
+ goto cleanup;
+
+ /* Run the test loop. */
+ for (i = 0; i < test_loop_count; i++) {
+ v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE);
+ *v_ptr = 'a';
+ vm_unmap_ram(v_ptr, map_nr_pages);
+ }
+
+cleanup:
+ for (i = 0; i < nr_allocated; i++)
+ __free_page(pages[i]);
+
+ kfree(pages);
+
+ /* 0 indicates success. */
+ return nr_allocated != map_nr_pages;
+}
+
+struct test_case_desc {
+ const char *test_name;
+ int (*test_func)(void);
+};
+
+static struct test_case_desc test_case_array[] = {
+ { "fix_size_alloc_test", fix_size_alloc_test },
+ { "full_fit_alloc_test", full_fit_alloc_test },
+ { "long_busy_list_alloc_test", long_busy_list_alloc_test },
+ { "random_size_alloc_test", random_size_alloc_test },
+ { "fix_align_alloc_test", fix_align_alloc_test },
+ { "random_size_align_alloc_test", random_size_align_alloc_test },
+ { "align_shift_alloc_test", align_shift_alloc_test },
+ { "pcpu_alloc_test", pcpu_alloc_test },
+ { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test },
+ { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test },
+ { "vm_map_ram_test", vm_map_ram_test },
+ /* Add a new test case here. */
+};
+
+struct test_case_data {
+ int test_failed;
+ int test_passed;
+ u64 time;
+};
+
+static struct test_driver {
+ struct task_struct *task;
+ struct test_case_data data[ARRAY_SIZE(test_case_array)];
+
+ unsigned long start;
+ unsigned long stop;
+} *tdriver;
+
+static void shuffle_array(int *arr, int n)
+{
+ int i, j;
+
+ for (i = n - 1; i > 0; i--) {
+ /* Cut the range. */
+ j = get_random_u32_below(i);
+
+ /* Swap indexes. */
+ swap(arr[i], arr[j]);
+ }
+}
+
+static int test_func(void *private)
+{
+ struct test_driver *t = private;
+ int random_array[ARRAY_SIZE(test_case_array)];
+ int index, i, j;
+ ktime_t kt;
+ u64 delta;
+
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
+ random_array[i] = i;
+
+ if (!sequential_test_order)
+ shuffle_array(random_array, ARRAY_SIZE(test_case_array));
+
+ /*
+ * Block until initialization is done.
+ */
+ down_read(&prepare_for_test_rwsem);
+
+ t->start = get_cycles();
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
+ index = random_array[i];
+
+ /*
+ * Skip tests if run_test_mask has been specified.
+ */
+ if (!((run_test_mask & (1 << index)) >> index))
+ continue;
+
+ kt = ktime_get();
+ for (j = 0; j < test_repeat_count; j++) {
+ if (!test_case_array[index].test_func())
+ t->data[index].test_passed++;
+ else
+ t->data[index].test_failed++;
+ }
+
+ /*
+ * Take an average time that test took.
+ */
+ delta = (u64) ktime_us_delta(ktime_get(), kt);
+ do_div(delta, (u32) test_repeat_count);
+
+ t->data[index].time = delta;
+ }
+ t->stop = get_cycles();
+
+ up_read(&prepare_for_test_rwsem);
+ test_report_one_done();
+
+ /*
+ * Wait for the kthread_stop() call.
+ */
+ while (!kthread_should_stop())
+ msleep(10);
+
+ return 0;
+}
+
+static int
+init_test_configurtion(void)
+{
+ /*
+ * A maximum number of workers is defined as hard-coded
+ * value and set to USHRT_MAX. We add such gap just in
+ * case and for potential heavy stressing.
+ */
+ nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX);
+
+ /* Allocate the space for test instances. */
+ tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL);
+ if (tdriver == NULL)
+ return -1;
+
+ if (test_repeat_count <= 0)
+ test_repeat_count = 1;
+
+ if (test_loop_count <= 0)
+ test_loop_count = 1;
+
+ return 0;
+}
+
+static void do_concurrent_test(void)
+{
+ int i, ret;
+
+ /*
+ * Set some basic configurations plus sanity check.
+ */
+ ret = init_test_configurtion();
+ if (ret < 0)
+ return;
+
+ /*
+ * Put on hold all workers.
+ */
+ down_write(&prepare_for_test_rwsem);
+
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
+
+ t->task = kthread_run(test_func, t, "vmalloc_test/%d", i);
+
+ if (!IS_ERR(t->task))
+ /* Success. */
+ atomic_inc(&test_n_undone);
+ else
+ pr_err("Failed to start %d kthread\n", i);
+ }
+
+ /*
+ * Now let the workers do their job.
+ */
+ up_write(&prepare_for_test_rwsem);
+
+ /*
+ * Sleep quiet until all workers are done with 1 second
+ * interval. Since the test can take a lot of time we
+ * can run into a stack trace of the hung task. That is
+ * why we go with completion_timeout and HZ value.
+ */
+ do {
+ ret = wait_for_completion_timeout(&test_all_done_comp, HZ);
+ } while (!ret);
+
+ for (i = 0; i < nr_threads; i++) {
+ struct test_driver *t = &tdriver[i];
+ int j;
+
+ if (!IS_ERR(t->task))
+ kthread_stop(t->task);
+
+ for (j = 0; j < ARRAY_SIZE(test_case_array); j++) {
+ if (!((run_test_mask & (1 << j)) >> j))
+ continue;
+
+ pr_info(
+ "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n",
+ test_case_array[j].test_name,
+ t->data[j].test_passed,
+ t->data[j].test_failed,
+ test_repeat_count, test_loop_count,
+ t->data[j].time);
+ }
+
+ pr_info("All test took worker%d=%lu cycles\n",
+ i, t->stop - t->start);
+ }
+
+ kvfree(tdriver);
+}
+
+static int vmalloc_test_init(void)
+{
+ do_concurrent_test();
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void vmalloc_test_exit(void)
+{
+}
+
+module_init(vmalloc_test_init)
+module_exit(vmalloc_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Uladzislau Rezki");
+MODULE_DESCRIPTION("vmalloc test module");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
new file mode 100644
index 000000000..e77d48564
--- /dev/null
+++ b/lib/test_xarray.c
@@ -0,0 +1,1836 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_xarray.c: Test the XArray API
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/xarray.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+static const unsigned int order_limit =
+ IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
+
+#ifndef XA_DEBUG
+# ifdef __KERNEL__
+void xa_dump(const struct xarray *xa) { }
+# endif
+#undef XA_BUG_ON
+#define XA_BUG_ON(xa, x) do { \
+ tests_run++; \
+ if (x) { \
+ printk("BUG at %s:%d\n", __func__, __LINE__); \
+ xa_dump(xa); \
+ dump_stack(); \
+ } else { \
+ tests_passed++; \
+ } \
+} while (0)
+#endif
+
+static void *xa_mk_index(unsigned long index)
+{
+ return xa_mk_value(index & LONG_MAX);
+}
+
+static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ return xa_store(xa, index, xa_mk_index(index), gfp);
+}
+
+static void xa_insert_index(struct xarray *xa, unsigned long index)
+{
+ XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
+ GFP_KERNEL) != 0);
+}
+
+static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ u32 id;
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
+ gfp) != 0);
+ XA_BUG_ON(xa, id != index);
+}
+
+static void xa_erase_index(struct xarray *xa, unsigned long index)
+{
+ XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
+ XA_BUG_ON(xa, xa_load(xa, index) != NULL);
+}
+
+/*
+ * If anyone needs this, please move it to xarray.c. We have no current
+ * users outside the test suite because all current multislot users want
+ * to use the advanced API.
+ */
+static void *xa_store_order(struct xarray *xa, unsigned long index,
+ unsigned order, void *entry, gfp_t gfp)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ void *curr;
+
+ do {
+ xas_lock(&xas);
+ curr = xas_store(&xas, entry);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return curr;
+}
+
+static noinline void check_xa_err(struct xarray *xa)
+{
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
+#ifndef __KERNEL__
+ /* The kernel does not fail GFP_NOWAIT allocations */
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
+#endif
+ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
+ XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
+// kills the test-suite :-(
+// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
+}
+
+static noinline void check_xas_retry(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_store_index(xa, 1, GFP_KERNEL);
+
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
+ xa_erase_index(xa, 1);
+ XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
+ XA_BUG_ON(xa, xas_retry(&xas, NULL));
+ XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
+ xas_reset(&xas);
+ XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
+ XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_node != NULL);
+ rcu_read_unlock();
+
+ XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+
+ rcu_read_lock();
+ XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
+ xas.xa_node = XAS_RESTART;
+ XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
+ rcu_read_unlock();
+
+ /* Make sure we can iterate through retry entries */
+ xas_lock(&xas);
+ xas_set(&xas, 0);
+ xas_store(&xas, XA_RETRY_ENTRY);
+ xas_set(&xas, 1);
+ xas_store(&xas, XA_RETRY_ENTRY);
+
+ xas_set(&xas, 0);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_store(&xas, xa_mk_index(xas.xa_index));
+ }
+ xas_unlock(&xas);
+
+ xa_erase_index(xa, 0);
+ xa_erase_index(xa, 1);
+}
+
+static noinline void check_xa_load(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 1024; j++) {
+ void *entry = xa_load(xa, j);
+ if (j < i)
+ XA_BUG_ON(xa, xa_to_value(entry) != j);
+ else
+ XA_BUG_ON(xa, entry);
+ }
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+ }
+
+ for (i = 0; i < 1024; i++) {
+ for (j = 0; j < 1024; j++) {
+ void *entry = xa_load(xa, j);
+ if (j >= i)
+ XA_BUG_ON(xa, xa_to_value(entry) != j);
+ else
+ XA_BUG_ON(xa, entry);
+ }
+ xa_erase_index(xa, i);
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
+{
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
+
+ /* NULL elements have no marks set */
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+
+ /* Storing a pointer will not make a mark appear */
+ XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ /* Setting one mark will not set another mark */
+ XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
+
+ /* Storing NULL clears marks, and they can't be set again */
+ xa_erase_index(xa, index);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
+
+ /*
+ * Storing a multi-index entry over entries with marks gives the
+ * entire entry the union of the marks
+ */
+ BUG_ON((index % 4) != 0);
+ for (order = 2; order < max_order; order++) {
+ unsigned long base = round_down(index, 1UL << order);
+ unsigned long next = base + (1UL << order);
+ unsigned long i;
+
+ XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
+ xa_set_mark(xa, index + 1, XA_MARK_0);
+ XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
+ xa_set_mark(xa, index + 2, XA_MARK_2);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
+ xa_store_order(xa, index, order, xa_mk_index(index),
+ GFP_KERNEL);
+ for (i = base; i < next; i++) {
+ XA_STATE(xas, xa, i);
+ unsigned int seen = 0;
+ void *entry;
+
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
+
+ /* We should see two elements in the array */
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX)
+ seen++;
+ rcu_read_unlock();
+ XA_BUG_ON(xa, seen != 2);
+
+ /* One of which is marked */
+ xas_set(&xas, 0);
+ seen = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ seen++;
+ rcu_read_unlock();
+ XA_BUG_ON(xa, seen != 1);
+ }
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
+ XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
+ xa_erase_index(xa, index);
+ xa_erase_index(xa, next);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_mark_2(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long index;
+ unsigned int count = 0;
+ void *entry;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_set_mark(xa, 0, XA_MARK_0);
+ xas_lock(&xas);
+ xas_load(&xas);
+ xas_init_marks(&xas);
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
+
+ for (index = 3500; index < 4500; index++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_0);
+ }
+
+ xas_reset(&xas);
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
+ count++;
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != 1000);
+
+ xas_lock(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xas_init_marks(&xas);
+ XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
+ XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
+ }
+ xas_unlock(&xas);
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xa_mark_3(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ XA_STATE(xas, xa, 0x41);
+ void *entry;
+ int count = 0;
+
+ xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL);
+ xa_set_mark(xa, 0x41, XA_MARK_0);
+
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) {
+ count++;
+ XA_BUG_ON(xa, entry != xa_mk_index(0x40));
+ }
+ XA_BUG_ON(xa, count != 1);
+ rcu_read_unlock();
+ xa_destroy(xa);
+#endif
+}
+
+static noinline void check_xa_mark(struct xarray *xa)
+{
+ unsigned long index;
+
+ for (index = 0; index < 16384; index += 4)
+ check_xa_mark_1(xa, index);
+
+ check_xa_mark_2(xa);
+ check_xa_mark_3(xa);
+}
+
+static noinline void check_xa_shrink(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 1);
+ struct xa_node *node;
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+
+ /*
+ * Check that erasing the entry at 1 shrinks the tree and properly
+ * marks the node as being deleted.
+ */
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
+ node = xas.xa_node;
+ XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
+ XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
+ XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
+ XA_BUG_ON(xa, xas_load(&xas) != NULL);
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 0; order < max_order; order++) {
+ unsigned long max = (1UL << order) - 1;
+ xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
+ rcu_read_lock();
+ node = xa_head(xa);
+ rcu_read_unlock();
+ XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
+ NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_head(xa) == node);
+ rcu_read_unlock();
+ XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, xa->xa_head != node);
+ xa_erase_index(xa, 0);
+ }
+}
+
+static noinline void check_insert(struct xarray *xa)
+{
+ unsigned long i;
+
+ for (i = 0; i < 1024; i++) {
+ xa_insert_index(xa, i);
+ XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
+ xa_erase_index(xa, i);
+ }
+
+ for (i = 10; i < BITS_PER_LONG; i++) {
+ xa_insert_index(xa, 1UL << i);
+ XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
+ xa_erase_index(xa, 1UL << i);
+
+ xa_insert_index(xa, (1UL << i) - 1);
+ XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
+ xa_erase_index(xa, (1UL << i) - 1);
+ }
+
+ xa_insert_index(xa, ~0UL);
+ XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
+ xa_erase_index(xa, ~0UL);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_cmpxchg(struct xarray *xa)
+{
+ void *FIVE = xa_mk_value(5);
+ void *SIX = xa_mk_value(6);
+ void *LOTS = xa_mk_value(12345678);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE);
+ XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY);
+ xa_erase_index(xa, 12345678);
+ xa_erase_index(xa, 5);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_reserve(struct xarray *xa)
+{
+ void *entry;
+ unsigned long index;
+ int count;
+
+ /* An array with a reserved entry is not empty */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_load(xa, 12345678));
+ xa_release(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Releasing a used entry does nothing */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
+ xa_release(xa, 12345678);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* cmpxchg sees a reserved entry as ZERO */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
+ xa_mk_value(12345678), GFP_NOWAIT) != NULL);
+ xa_release(xa, 12345678);
+ xa_erase_index(xa, 12345678);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* xa_insert treats it as busy */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
+ -EBUSY);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Can iterate through a reserved entry */
+ xa_store_index(xa, 5, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
+ xa_store_index(xa, 7, GFP_KERNEL);
+
+ count = 0;
+ xa_for_each(xa, index, entry) {
+ XA_BUG_ON(xa, index != 5 && index != 7);
+ count++;
+ }
+ XA_BUG_ON(xa, count != 2);
+
+ /* If we free a reserved entry, we should be able to allocate it */
+ if (xa->xa_flags & XA_FLAGS_ALLOC) {
+ u32 id;
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
+ XA_LIMIT(5, 10), GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 8);
+
+ xa_release(xa, 6);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
+ XA_LIMIT(5, 10), GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 6);
+ }
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xas_erase(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned long i, j;
+
+ for (i = 0; i < 200; i++) {
+ for (j = i; j < 2 * i + 17; j++) {
+ xas_set(&xas, j);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_index(j));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xas_set(&xas, ULONG_MAX);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(0));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ xas_lock(&xas);
+ xas_store(&xas, NULL);
+
+ xas_set(&xas, 0);
+ j = i;
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(j));
+ xas_store(&xas, NULL);
+ j++;
+ }
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ unsigned long min = index & ~((1UL << order) - 1);
+ unsigned long max = min + (1UL << order);
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
+ XA_BUG_ON(xa, xa_load(xa, max) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
+ XA_BUG_ON(xa, xa_load(xa, max) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
+
+ xa_erase_index(xa, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+ xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
+
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_index != index);
+ XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ int n = 0;
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+ xas_lock(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ n++;
+ }
+ XA_BUG_ON(xa, n != 1);
+ xas_set(&xas, index + 1);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ n++;
+ }
+ XA_BUG_ON(xa, n != 2);
+ xas_unlock(&xas);
+
+ xa_destroy(xa);
+}
+#endif
+
+static noinline void check_multi_store(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned long i, j, k;
+ unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
+
+ /* Loading from any position returns the same value */
+ xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
+ rcu_read_unlock();
+
+ /* Storing adjacent to the value does not alter the value */
+ xa_store(xa, 3, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
+ XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
+ rcu_read_unlock();
+
+ /* Overwriting multiple indexes works */
+ xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
+ XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
+ XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
+ rcu_read_unlock();
+
+ /* We can erase multiple values with a single store */
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Even when the first slot is empty but the others aren't */
+ xa_store_index(xa, 1, GFP_KERNEL);
+ xa_store_index(xa, 2, GFP_KERNEL);
+ xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0; i < max_order; i++) {
+ for (j = 0; j < max_order; j++) {
+ xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
+ xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
+
+ for (k = 0; k < max_order; k++) {
+ void *entry = xa_load(xa, (1UL << k) - 1);
+ if ((i < k) && (j < k))
+ XA_BUG_ON(xa, entry != NULL);
+ else
+ XA_BUG_ON(xa, entry != xa_mk_index(j));
+ }
+
+ xa_erase(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ }
+
+ for (i = 0; i < 20; i++) {
+ check_multi_store_1(xa, 200, i);
+ check_multi_store_1(xa, 0, i);
+ check_multi_store_1(xa, (1UL << i) + 1, i);
+ }
+ check_multi_store_2(xa, 4095, 9);
+
+ for (i = 1; i < 20; i++) {
+ check_multi_store_3(xa, 0, i);
+ check_multi_store_3(xa, 1UL << i, i);
+ }
+#endif
+}
+
+static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
+{
+ int i;
+ u32 id;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ /* An empty array should assign %base to the first alloc */
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* Erasing it should make the array empty again */
+ xa_erase_index(xa, base);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* And it should assign %base again */
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* Allocating and then erasing a lot should not lose base */
+ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
+ xa_alloc_index(xa, i, GFP_KERNEL);
+ for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
+ xa_erase_index(xa, i);
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* Destroying the array should do the same as erasing */
+ xa_destroy(xa);
+
+ /* And it should assign %base again */
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* The next assigned ID should be base+1 */
+ xa_alloc_index(xa, base + 1, GFP_KERNEL);
+ xa_erase_index(xa, base + 1);
+
+ /* Storing a value should mark it used */
+ xa_store_index(xa, base + 1, GFP_KERNEL);
+ xa_alloc_index(xa, base + 2, GFP_KERNEL);
+
+ /* If we then erase base, it should be free */
+ xa_erase_index(xa, base);
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ xa_erase_index(xa, base + 1);
+ xa_erase_index(xa, base + 2);
+
+ for (i = 1; i < 5000; i++) {
+ xa_alloc_index(xa, base + i, GFP_KERNEL);
+ }
+
+ xa_destroy(xa);
+
+ /* Check that we fail properly at the limit of allocation */
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 0xfffffffeU);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 0xffffffffU);
+ id = 3;
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
+ GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, id != 3);
+ xa_destroy(xa);
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
+ GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
+ GFP_KERNEL) != -EBUSY);
+ xa_erase_index(xa, 3);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
+{
+ unsigned int i, id;
+ unsigned long index;
+ void *entry;
+
+ /* Allocate and free a NULL and check xa_empty() behaves */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Ditto, but check destroy instead of erase */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = base; i < base + 10; i++) {
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+
+ XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
+ XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 5);
+
+ xa_for_each(xa, index, entry) {
+ xa_erase_index(xa, index);
+ }
+
+ for (i = base; i < base + 9; i++) {
+ XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ }
+ XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
+{
+ struct xa_limit limit = XA_LIMIT(1, 0x3fff);
+ u32 next = 0;
+ unsigned int i, id;
+ unsigned long index;
+ void *entry;
+
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
+ &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 1);
+
+ next = 0x3ffd;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
+ &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 0x3ffd);
+ xa_erase_index(xa, 0x3ffd);
+ xa_erase_index(xa, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0x3ffe; i < 0x4003; i++) {
+ if (i < 0x4000)
+ entry = xa_mk_index(i);
+ else
+ entry = xa_mk_index(i - 0x3fff);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
+ &next, GFP_KERNEL) != (id == 1));
+ XA_BUG_ON(xa, xa_mk_index(id) != entry);
+ }
+
+ /* Check wrap-around is handled correctly */
+ if (base != 0)
+ xa_erase_index(xa, base);
+ xa_erase_index(xa, base + 1);
+ next = UINT_MAX;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != UINT_MAX);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
+ xa_limit_32b, &next, GFP_KERNEL) != 1);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base + 1);
+
+ xa_for_each(xa, index, entry)
+ xa_erase_index(xa, index);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static DEFINE_XARRAY_ALLOC(xa0);
+static DEFINE_XARRAY_ALLOC1(xa1);
+
+static noinline void check_xa_alloc(void)
+{
+ check_xa_alloc_1(&xa0, 0);
+ check_xa_alloc_1(&xa1, 1);
+ check_xa_alloc_2(&xa0, 0);
+ check_xa_alloc_2(&xa1, 1);
+ check_xa_alloc_3(&xa0, 0);
+ check_xa_alloc_3(&xa1, 1);
+}
+
+static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
+ unsigned int order, unsigned int present)
+{
+ XA_STATE_ORDER(xas, xa, start, order);
+ void *entry;
+ unsigned int count = 0;
+
+retry:
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, !xa_is_value(entry));
+ XA_BUG_ON(xa, entry < xa_mk_index(start));
+ XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
+ count++;
+ }
+ xas_store(&xas, xa_mk_index(start));
+ xas_unlock(&xas);
+ if (xas_nomem(&xas, GFP_KERNEL)) {
+ count = 0;
+ goto retry;
+ }
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, count != present);
+ XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
+ XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
+ xa_mk_index(start));
+ xa_erase_index(xa, start);
+}
+
+static noinline void check_store_iter(struct xarray *xa)
+{
+ unsigned int i, j;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+
+ for (i = 0; i < max_order; i++) {
+ unsigned int min = 1 << i;
+ unsigned int max = (2 << i) - 1;
+ __check_store_iter(xa, 0, i, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ __check_store_iter(xa, min, i, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ xa_store_index(xa, min, GFP_KERNEL);
+ __check_store_iter(xa, min, i, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_index(xa, max, GFP_KERNEL);
+ __check_store_iter(xa, min, i, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (j = 0; j < min; j++)
+ xa_store_index(xa, j, GFP_KERNEL);
+ __check_store_iter(xa, 0, i, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ for (j = 0; j < min; j++)
+ xa_store_index(xa, min + j, GFP_KERNEL);
+ __check_store_iter(xa, min, i, min);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_index(xa, 63, GFP_KERNEL);
+ xa_store_index(xa, 65, GFP_KERNEL);
+ __check_store_iter(xa, 64, 2, 1);
+ xa_erase_index(xa, 63);
+#endif
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned long multi = 3 << order;
+ unsigned long next = 4 << order;
+ unsigned long index;
+
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
+
+ index = 0;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, index != multi);
+ index = multi + 1;
+ XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, (index < multi) || (index >= next));
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
+ xa_mk_value(next));
+ XA_BUG_ON(xa, index != next);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+ XA_BUG_ON(xa, index != next);
+
+ xa_erase_index(xa, multi);
+ xa_erase_index(xa, next);
+ xa_erase_index(xa, next + 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
+static noinline void check_multi_find_2(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
+ unsigned int i, j;
+ void *entry;
+
+ for (i = 0; i < max_order; i++) {
+ unsigned long index = 1UL << i;
+ for (j = 0; j < index; j++) {
+ XA_STATE(xas, xa, j + index);
+ xa_store_index(xa, index - 1, GFP_KERNEL);
+ xa_store_order(xa, index, i, xa_mk_index(index),
+ GFP_KERNEL);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ xa_erase_index(xa, index);
+ }
+ rcu_read_unlock();
+ xa_erase_index(xa, index - 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+ }
+}
+
+static noinline void check_multi_find_3(struct xarray *xa)
+{
+ unsigned int order;
+
+ for (order = 5; order < order_limit; order++) {
+ unsigned long index = 1UL << (order - 5);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
+ xa_erase_index(xa, 0);
+ }
+}
+
+static noinline void check_find_1(struct xarray *xa)
+{
+ unsigned long i, j, k;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /*
+ * Check xa_find with all pairs between 0 and 99 inclusive,
+ * starting at every index between 0 and 99
+ */
+ for (i = 0; i < 100; i++) {
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+ xa_set_mark(xa, i, XA_MARK_0);
+ for (j = 0; j < i; j++) {
+ XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
+ NULL);
+ xa_set_mark(xa, j, XA_MARK_0);
+ for (k = 0; k < 100; k++) {
+ unsigned long index = k;
+ void *entry = xa_find(xa, &index, ULONG_MAX,
+ XA_PRESENT);
+ if (k <= j)
+ XA_BUG_ON(xa, index != j);
+ else if (k <= i)
+ XA_BUG_ON(xa, index != i);
+ else
+ XA_BUG_ON(xa, entry != NULL);
+
+ index = k;
+ entry = xa_find(xa, &index, ULONG_MAX,
+ XA_MARK_0);
+ if (k <= j)
+ XA_BUG_ON(xa, index != j);
+ else if (k <= i)
+ XA_BUG_ON(xa, index != i);
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ }
+ xa_erase_index(xa, j);
+ XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
+ }
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
+ }
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_find_2(struct xarray *xa)
+{
+ void *entry;
+ unsigned long i, j, index;
+
+ xa_for_each(xa, index, entry) {
+ XA_BUG_ON(xa, true);
+ }
+
+ for (i = 0; i < 1024; i++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ j = 0;
+ xa_for_each(xa, index, entry) {
+ XA_BUG_ON(xa, xa_mk_index(index) != entry);
+ XA_BUG_ON(xa, index != j++);
+ }
+ }
+
+ xa_destroy(xa);
+}
+
+static noinline void check_find_3(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long i, j, k;
+ void *entry;
+
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 100; j++) {
+ rcu_read_lock();
+ for (k = 0; k < 100; k++) {
+ xas_set(&xas, j);
+ xas_for_each_marked(&xas, entry, k, XA_MARK_0)
+ ;
+ if (j > k)
+ XA_BUG_ON(xa,
+ xas.xa_node != XAS_RESTART);
+ }
+ rcu_read_unlock();
+ }
+ xa_store_index(xa, i, GFP_KERNEL);
+ xa_set_mark(xa, i, XA_MARK_0);
+ }
+ xa_destroy(xa);
+}
+
+static noinline void check_find_4(struct xarray *xa)
+{
+ unsigned long index = 0;
+ void *entry;
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry);
+
+ xa_erase_index(xa, ULONG_MAX);
+}
+
+static noinline void check_find(struct xarray *xa)
+{
+ unsigned i;
+
+ check_find_1(xa);
+ check_find_2(xa);
+ check_find_3(xa);
+ check_find_4(xa);
+
+ for (i = 2; i < 10; i++)
+ check_multi_find_1(xa, i);
+ check_multi_find_2(xa);
+ check_multi_find_3(xa);
+}
+
+/* See find_swap_entry() in mm/shmem.c */
+static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned int checked = 0;
+ void *entry;
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ if (xas_retry(&xas, entry))
+ continue;
+ if (entry == item)
+ break;
+ checked++;
+ if ((checked % 4) != 0)
+ continue;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+
+ return entry ? xas.xa_index : -1;
+}
+
+static noinline void check_find_entry(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int order;
+ unsigned long offset, index;
+
+ for (order = 0; order < 20; order++) {
+ for (offset = 0; offset < (1UL << (order + 3));
+ offset += (1UL << order)) {
+ for (index = 0; index < (1UL << (order + 5));
+ index += (1UL << order)) {
+ xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, index) !=
+ xa_mk_index(index));
+ XA_BUG_ON(xa, xa_find_entry(xa,
+ xa_mk_index(index)) != index);
+ }
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ xa_destroy(xa);
+ }
+ }
+#endif
+
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_pause(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ unsigned int order;
+ unsigned long index = 1;
+ unsigned int count = 0;
+
+ for (order = 0; order < order_limit; order++) {
+ XA_BUG_ON(xa, xa_store_order(xa, index, order,
+ xa_mk_index(index), GFP_KERNEL));
+ index += 1UL << order;
+ }
+
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ count = 0;
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
+ count++;
+ xas_pause(&xas);
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, count != order_limit);
+
+ xa_destroy(xa);
+}
+
+static noinline void check_move_tiny(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ rcu_read_unlock();
+ xa_store_index(xa, 0, GFP_KERNEL);
+ rcu_read_lock();
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ xas_set(&xas, 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ rcu_read_unlock();
+ xa_erase_index(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move_max(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ xas_pause(&xas);
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move_small(struct xarray *xa, unsigned long idx)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long i;
+
+ xa_store_index(xa, 0, GFP_KERNEL);
+ xa_store_index(xa, idx, GFP_KERNEL);
+
+ rcu_read_lock();
+ for (i = 0; i < idx * 4; i++) {
+ void *entry = xas_next(&xas);
+ if (i <= idx)
+ XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
+ XA_BUG_ON(xa, xas.xa_index != i);
+ if (i == 0 || i == idx)
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ }
+ xas_next(&xas);
+ XA_BUG_ON(xa, xas.xa_index != i);
+
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ if (i <= idx)
+ XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
+ XA_BUG_ON(xa, xas.xa_index != i);
+ if (i == 0 || i == idx)
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ } while (i > 0);
+
+ xas_set(&xas, ULONG_MAX);
+ XA_BUG_ON(xa, xas_next(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+ XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
+ XA_BUG_ON(xa, xas.xa_index != 0);
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, 0);
+ xa_erase_index(xa, idx);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_move(struct xarray *xa)
+{
+ XA_STATE(xas, xa, (1 << 16) - 1);
+ unsigned long i;
+
+ for (i = 0; i < (1 << 16); i++)
+ XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
+
+ rcu_read_lock();
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
+ XA_BUG_ON(xa, i != xas.xa_index);
+ } while (i != 0);
+
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+
+ do {
+ void *entry = xas_next(&xas);
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
+ XA_BUG_ON(xa, i != xas.xa_index);
+ i++;
+ } while (i < (1 << 16));
+ rcu_read_unlock();
+
+ for (i = (1 << 8); i < (1 << 15); i++)
+ xa_erase_index(xa, i);
+
+ i = xas.xa_index;
+
+ rcu_read_lock();
+ do {
+ void *entry = xas_prev(&xas);
+ i--;
+ if ((i < (1 << 8)) || (i >= (1 << 15)))
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ XA_BUG_ON(xa, i != xas.xa_index);
+ } while (i != 0);
+
+ XA_BUG_ON(xa, xas_prev(&xas) != NULL);
+ XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
+
+ do {
+ void *entry = xas_next(&xas);
+ if ((i < (1 << 8)) || (i >= (1 << 15)))
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
+ else
+ XA_BUG_ON(xa, entry != NULL);
+ XA_BUG_ON(xa, i != xas.xa_index);
+ i++;
+ } while (i < (1 << 16));
+ rcu_read_unlock();
+
+ xa_destroy(xa);
+
+ check_move_tiny(xa);
+ check_move_max(xa);
+
+ for (i = 0; i < 16; i++)
+ check_move_small(xa, 1UL << i);
+
+ for (i = 2; i < 16; i++)
+ check_move_small(xa, (1UL << i) - 1);
+}
+
+static noinline void xa_store_many_order(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i = 0;
+
+ do {
+ xas_lock(&xas);
+ XA_BUG_ON(xa, xas_find_conflict(&xas));
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < (1U << order); i++) {
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
+ xas_next(&xas);
+ }
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+}
+
+static noinline void check_create_range_1(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ unsigned long i;
+
+ xa_store_many_order(xa, index, order);
+ for (i = index; i < index + (1UL << order); i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_2(struct xarray *xa, unsigned order)
+{
+ unsigned long i;
+ unsigned long nr = 1UL << order;
+
+ for (i = 0; i < nr * nr; i += nr)
+ xa_store_many_order(xa, i, order);
+ for (i = 0; i < nr * nr; i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_3(void)
+{
+ XA_STATE(xas, NULL, 0);
+ xas_set_err(&xas, -EEXIST);
+ xas_create_range(&xas);
+ XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
+}
+
+static noinline void check_create_range_4(struct xarray *xa,
+ unsigned long index, unsigned order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned long base = xas.xa_index;
+ unsigned long i = 0;
+
+ xa_store_index(xa, index, GFP_KERNEL);
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ if (xas_error(&xas))
+ goto unlock;
+ for (i = 0; i < (1UL << order); i++) {
+ void *old = xas_store(&xas, xa_mk_index(base + i));
+ if (xas.xa_index == index)
+ XA_BUG_ON(xa, old != xa_mk_index(base + i));
+ else
+ XA_BUG_ON(xa, old != NULL);
+ xas_next(&xas);
+ }
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+
+ for (i = base; i < base + (1UL << order); i++)
+ xa_erase_index(xa, i);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_create_range_5(struct xarray *xa,
+ unsigned long index, unsigned int order)
+{
+ XA_STATE_ORDER(xas, xa, index, order);
+ unsigned int i;
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+ for (i = 0; i < order + 10; i++) {
+ do {
+ xas_lock(&xas);
+ xas_create_range(&xas);
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+ }
+
+ xa_destroy(xa);
+}
+
+static noinline void check_create_range(struct xarray *xa)
+{
+ unsigned int order;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
+
+ for (order = 0; order < max_order; order++) {
+ check_create_range_1(xa, 0, order);
+ check_create_range_1(xa, 1U << order, order);
+ check_create_range_1(xa, 2U << order, order);
+ check_create_range_1(xa, 3U << order, order);
+ check_create_range_1(xa, 1U << 24, order);
+ if (order < 10)
+ check_create_range_2(xa, order);
+
+ check_create_range_4(xa, 0, order);
+ check_create_range_4(xa, 1U << order, order);
+ check_create_range_4(xa, 2U << order, order);
+ check_create_range_4(xa, 3U << order, order);
+ check_create_range_4(xa, 1U << 24, order);
+
+ check_create_range_4(xa, 1, order);
+ check_create_range_4(xa, (1U << order) + 1, order);
+ check_create_range_4(xa, (2U << order) + 1, order);
+ check_create_range_4(xa, (2U << order) - 1, order);
+ check_create_range_4(xa, (3U << order) + 1, order);
+ check_create_range_4(xa, (3U << order) - 1, order);
+ check_create_range_4(xa, (1U << 24) + 1, order);
+
+ check_create_range_5(xa, 0, order);
+ check_create_range_5(xa, (1U << order), order);
+ }
+
+ check_create_range_3();
+}
+
+static noinline void __check_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
+
+ XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
+ XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
+ XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
+ XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
+
+ xa_store_range(xa, first, last, NULL, GFP_KERNEL);
+#endif
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_store_range(struct xarray *xa)
+{
+ unsigned long i, j;
+
+ for (i = 0; i < 128; i++) {
+ for (j = i; j < 128; j++) {
+ __check_store_range(xa, i, j);
+ __check_store_range(xa, 128 + i, 128 + j);
+ __check_store_range(xa, 4095 + i, 4095 + j);
+ __check_store_range(xa, 4096 + i, 4096 + j);
+ __check_store_range(xa, 123456 + i, 123456 + j);
+ __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
+ }
+ }
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+static void check_split_1(struct xarray *xa, unsigned long index,
+ unsigned int order, unsigned int new_order)
+{
+ XA_STATE_ORDER(xas, xa, index, new_order);
+ unsigned int i;
+
+ xa_store_order(xa, index, order, xa, GFP_KERNEL);
+
+ xas_split_alloc(&xas, xa, order, GFP_KERNEL);
+ xas_lock(&xas);
+ xas_split(&xas, xa, order);
+ for (i = 0; i < (1 << order); i += (1 << new_order))
+ __xa_store(xa, index + i, xa_mk_index(index + i), 0);
+ xas_unlock(&xas);
+
+ for (i = 0; i < (1 << order); i++) {
+ unsigned int val = index + (i & ~((1 << new_order) - 1));
+ XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
+ }
+
+ xa_set_mark(xa, index, XA_MARK_0);
+ XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_split(struct xarray *xa)
+{
+ unsigned int order, new_order;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
+ for (new_order = 0; new_order < order; new_order++) {
+ check_split_1(xa, 0, order, new_order);
+ check_split_1(xa, 1UL << order, order, new_order);
+ check_split_1(xa, 3UL << order, order, new_order);
+ }
+ }
+}
+#else
+static void check_split(struct xarray *xa) { }
+#endif
+
+static void check_align_1(struct xarray *xa, char *name)
+{
+ int i;
+ unsigned int id;
+ unsigned long index;
+ void *entry;
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+ xa_for_each(xa, index, entry)
+ XA_BUG_ON(xa, xa_is_err(entry));
+ xa_destroy(xa);
+}
+
+/*
+ * We should always be able to store without allocating memory after
+ * reserving a slot.
+ */
+static void check_align_2(struct xarray *xa, char *name)
+{
+ int i;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
+ xa_erase(xa, 0);
+ }
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
+ xa_erase(xa, 0);
+ }
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_align(struct xarray *xa)
+{
+ char name[] = "Motorola 68000";
+
+ check_align_1(xa, name);
+ check_align_1(xa, name + 1);
+ check_align_1(xa, name + 2);
+ check_align_1(xa, name + 3);
+ check_align_2(xa, name);
+}
+
+static LIST_HEAD(shadow_nodes);
+
+static void test_update_node(struct xa_node *node)
+{
+ if (node->count && node->count == node->nr_values) {
+ if (list_empty(&node->private_list))
+ list_add(&shadow_nodes, &node->private_list);
+ } else {
+ if (!list_empty(&node->private_list))
+ list_del_init(&node->private_list);
+ }
+}
+
+static noinline void shadow_remove(struct xarray *xa)
+{
+ struct xa_node *node;
+
+ xa_lock(xa);
+ while ((node = list_first_entry_or_null(&shadow_nodes,
+ struct xa_node, private_list))) {
+ XA_BUG_ON(xa, node->array != xa);
+ list_del_init(&node->private_list);
+ xa_delete_node(node, test_update_node);
+ }
+ xa_unlock(xa);
+}
+
+static noinline void check_workingset(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ xas_set_update(&xas, test_update_node);
+
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(0));
+ xas_next(&xas);
+ xas_store(&xas, xa_mk_value(1));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, list_empty(&shadow_nodes));
+
+ xas_lock(&xas);
+ xas_next(&xas);
+ xas_store(&xas, &xas);
+ XA_BUG_ON(xa, !list_empty(&shadow_nodes));
+
+ xas_store(&xas, xa_mk_value(2));
+ xas_unlock(&xas);
+ XA_BUG_ON(xa, list_empty(&shadow_nodes));
+
+ shadow_remove(xa);
+ XA_BUG_ON(xa, !list_empty(&shadow_nodes));
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+/*
+ * Check that the pointer / value / sibling entries are accounted the
+ * way we expect them to be.
+ */
+static noinline void check_account(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int order;
+
+ for (order = 1; order < 12; order++) {
+ XA_STATE(xas, xa, 1 << order);
+
+ xa_store_order(xa, 0, order, xa, GFP_KERNEL);
+ rcu_read_lock();
+ xas_load(&xas);
+ XA_BUG_ON(xa, xas.xa_node->count == 0);
+ XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
+ XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+ rcu_read_unlock();
+
+ xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
+ GFP_KERNEL);
+ XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
+
+ xa_erase(xa, 1 << order);
+ XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+
+ xa_erase(xa, 0);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+#endif
+}
+
+static noinline void check_get_order(struct xarray *xa)
+{
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (i = 0; i < 3; i++)
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xa_store_order(xa, i << order, order,
+ xa_mk_index(i << order), GFP_KERNEL);
+ for (j = i << order; j < (i + 1) << order; j++)
+ XA_BUG_ON(xa, xa_get_order(xa, j) != order);
+ xa_erase(xa, i << order);
+ }
+ }
+}
+
+static noinline void check_destroy(struct xarray *xa)
+{
+ unsigned long index;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Destroying an empty array is a no-op */
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Destroying an array with a single entry */
+ for (index = 0; index < 1000; index++) {
+ xa_store_index(xa, index, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ }
+
+ /* Destroying an array with a single entry at ULONG_MAX */
+ xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+#ifdef CONFIG_XARRAY_MULTI
+ /* Destroying an array with a multi-index entry */
+ xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
+static DEFINE_XARRAY(array);
+
+static int xarray_checks(void)
+{
+ check_xa_err(&array);
+ check_xas_retry(&array);
+ check_xa_load(&array);
+ check_xa_mark(&array);
+ check_xa_shrink(&array);
+ check_xas_erase(&array);
+ check_insert(&array);
+ check_cmpxchg(&array);
+ check_reserve(&array);
+ check_reserve(&xa0);
+ check_multi_store(&array);
+ check_get_order(&array);
+ check_xa_alloc();
+ check_find(&array);
+ check_find_entry(&array);
+ check_pause(&array);
+ check_account(&array);
+ check_destroy(&array);
+ check_move(&array);
+ check_create_range(&array);
+ check_store_range(&array);
+ check_store_iter(&array);
+ check_align(&xa0);
+ check_split(&array);
+
+ check_workingset(&array, 0);
+ check_workingset(&array, 64);
+ check_workingset(&array, 4096);
+
+ printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
+ return (tests_run == tests_passed) ? 0 : -EINVAL;
+}
+
+static void xarray_exit(void)
+{
+}
+
+module_init(xarray_checks);
+module_exit(xarray_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/textsearch.c b/lib/textsearch.c
new file mode 100644
index 000000000..f68dea880
--- /dev/null
+++ b/lib/textsearch.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lib/textsearch.c Generic text search interface
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ * Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * ==========================================================================
+ */
+
+/**
+ * DOC: ts_intro
+ * INTRODUCTION
+ *
+ * The textsearch infrastructure provides text searching facilities for
+ * both linear and non-linear data. Individual search algorithms are
+ * implemented in modules and chosen by the user.
+ *
+ * ARCHITECTURE
+ *
+ * .. code-block:: none
+ *
+ * User
+ * +----------------+
+ * | finish()|<--------------(6)-----------------+
+ * |get_next_block()|<--------------(5)---------------+ |
+ * | | Algorithm | |
+ * | | +------------------------------+
+ * | | | init() find() destroy() |
+ * | | +------------------------------+
+ * | | Core API ^ ^ ^
+ * | | +---------------+ (2) (4) (8)
+ * | (1)|----->| prepare() |---+ | |
+ * | (3)|----->| find()/next() |-----------+ |
+ * | (7)|----->| destroy() |----------------------+
+ * +----------------+ +---------------+
+ *
+ * (1) User configures a search by calling textsearch_prepare() specifying
+ * the search parameters such as the pattern and algorithm name.
+ * (2) Core requests the algorithm to allocate and initialize a search
+ * configuration according to the specified parameters.
+ * (3) User starts the search(es) by calling textsearch_find() or
+ * textsearch_next() to fetch subsequent occurrences. A state variable
+ * is provided to the algorithm to store persistent variables.
+ * (4) Core eventually resets the search offset and forwards the find()
+ * request to the algorithm.
+ * (5) Algorithm calls get_next_block() provided by the user continuously
+ * to fetch the data to be searched in block by block.
+ * (6) Algorithm invokes finish() after the last call to get_next_block
+ * to clean up any leftovers from get_next_block. (Optional)
+ * (7) User destroys the configuration by calling textsearch_destroy().
+ * (8) Core notifies the algorithm to destroy algorithm specific
+ * allocations. (Optional)
+ *
+ * USAGE
+ *
+ * Before a search can be performed, a configuration must be created
+ * by calling textsearch_prepare() specifying the searching algorithm,
+ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
+ * to perform case insensitive matching. But it might slow down
+ * performance of algorithm, so you should use it at own your risk.
+ * The returned configuration may then be used for an arbitrary
+ * amount of times and even in parallel as long as a separate struct
+ * ts_state variable is provided to every instance.
+ *
+ * The actual search is performed by either calling
+ * textsearch_find_continuous() for linear data or by providing
+ * an own get_next_block() implementation and
+ * calling textsearch_find(). Both functions return
+ * the position of the first occurrence of the pattern or UINT_MAX if
+ * no match was found. Subsequent occurrences can be found by calling
+ * textsearch_next() regardless of the linearity of the data.
+ *
+ * Once you're done using a configuration it must be given back via
+ * textsearch_destroy.
+ *
+ * EXAMPLE::
+ *
+ * int pos;
+ * struct ts_config *conf;
+ * struct ts_state state;
+ * const char *pattern = "chicken";
+ * const char *example = "We dance the funky chicken";
+ *
+ * conf = textsearch_prepare("kmp", pattern, strlen(pattern),
+ * GFP_KERNEL, TS_AUTOLOAD);
+ * if (IS_ERR(conf)) {
+ * err = PTR_ERR(conf);
+ * goto errout;
+ * }
+ *
+ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
+ * if (pos != UINT_MAX)
+ * panic("Oh my god, dancing chickens at %d\n", pos);
+ *
+ * textsearch_destroy(conf);
+ */
+/* ========================================================================== */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/err.h>
+#include <linux/textsearch.h>
+#include <linux/slab.h>
+
+static LIST_HEAD(ts_ops);
+static DEFINE_SPINLOCK(ts_mod_lock);
+
+static inline struct ts_ops *lookup_ts_algo(const char *name)
+{
+ struct ts_ops *o;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(o, &ts_ops, list) {
+ if (!strcmp(name, o->name)) {
+ if (!try_module_get(o->owner))
+ o = NULL;
+ rcu_read_unlock();
+ return o;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * textsearch_register - register a textsearch module
+ * @ops: operations lookup table
+ *
+ * This function must be called by textsearch modules to announce
+ * their presence. The specified &@ops must have %name set to a
+ * unique identifier and the callbacks find(), init(), get_pattern(),
+ * and get_pattern_len() must be implemented.
+ *
+ * Returns 0 or -EEXISTS if another module has already registered
+ * with same name.
+ */
+int textsearch_register(struct ts_ops *ops)
+{
+ int err = -EEXIST;
+ struct ts_ops *o;
+
+ if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||
+ ops->get_pattern == NULL || ops->get_pattern_len == NULL)
+ return -EINVAL;
+
+ spin_lock(&ts_mod_lock);
+ list_for_each_entry(o, &ts_ops, list) {
+ if (!strcmp(ops->name, o->name))
+ goto errout;
+ }
+
+ list_add_tail_rcu(&ops->list, &ts_ops);
+ err = 0;
+errout:
+ spin_unlock(&ts_mod_lock);
+ return err;
+}
+EXPORT_SYMBOL(textsearch_register);
+
+/**
+ * textsearch_unregister - unregister a textsearch module
+ * @ops: operations lookup table
+ *
+ * This function must be called by textsearch modules to announce
+ * their disappearance for examples when the module gets unloaded.
+ * The &ops parameter must be the same as the one during the
+ * registration.
+ *
+ * Returns 0 on success or -ENOENT if no matching textsearch
+ * registration was found.
+ */
+int textsearch_unregister(struct ts_ops *ops)
+{
+ int err = 0;
+ struct ts_ops *o;
+
+ spin_lock(&ts_mod_lock);
+ list_for_each_entry(o, &ts_ops, list) {
+ if (o == ops) {
+ list_del_rcu(&o->list);
+ goto out;
+ }
+ }
+
+ err = -ENOENT;
+out:
+ spin_unlock(&ts_mod_lock);
+ return err;
+}
+EXPORT_SYMBOL(textsearch_unregister);
+
+struct ts_linear_state
+{
+ unsigned int len;
+ const void *data;
+};
+
+static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
+ struct ts_config *conf,
+ struct ts_state *state)
+{
+ struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
+
+ if (likely(consumed < st->len)) {
+ *dst = st->data + consumed;
+ return st->len - consumed;
+ }
+
+ return 0;
+}
+
+/**
+ * textsearch_find_continuous - search a pattern in continuous/linear data
+ * @conf: search configuration
+ * @state: search state
+ * @data: data to search in
+ * @len: length of data
+ *
+ * A simplified version of textsearch_find() for continuous/linear data.
+ * Call textsearch_next() to retrieve subsequent matches.
+ *
+ * Returns the position of first occurrence of the pattern or
+ * %UINT_MAX if no occurrence was found.
+ */
+unsigned int textsearch_find_continuous(struct ts_config *conf,
+ struct ts_state *state,
+ const void *data, unsigned int len)
+{
+ struct ts_linear_state *st = (struct ts_linear_state *) state->cb;
+
+ conf->get_next_block = get_linear_data;
+ st->data = data;
+ st->len = len;
+
+ return textsearch_find(conf, state);
+}
+EXPORT_SYMBOL(textsearch_find_continuous);
+
+/**
+ * textsearch_prepare - Prepare a search
+ * @algo: name of search algorithm
+ * @pattern: pattern data
+ * @len: length of pattern
+ * @gfp_mask: allocation mask
+ * @flags: search flags
+ *
+ * Looks up the search algorithm module and creates a new textsearch
+ * configuration for the specified pattern.
+ *
+ * Note: The format of the pattern may not be compatible between
+ * the various search algorithms.
+ *
+ * Returns a new textsearch configuration according to the specified
+ * parameters or a ERR_PTR(). If a zero length pattern is passed, this
+ * function returns EINVAL.
+ */
+struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
+ unsigned int len, gfp_t gfp_mask, int flags)
+{
+ int err = -ENOENT;
+ struct ts_config *conf;
+ struct ts_ops *ops;
+
+ if (len == 0)
+ return ERR_PTR(-EINVAL);
+
+ ops = lookup_ts_algo(algo);
+#ifdef CONFIG_MODULES
+ /*
+ * Why not always autoload you may ask. Some users are
+ * in a situation where requesting a module may deadlock,
+ * especially when the module is located on a NFS mount.
+ */
+ if (ops == NULL && flags & TS_AUTOLOAD) {
+ request_module("ts_%s", algo);
+ ops = lookup_ts_algo(algo);
+ }
+#endif
+
+ if (ops == NULL)
+ goto errout;
+
+ conf = ops->init(pattern, len, gfp_mask, flags);
+ if (IS_ERR(conf)) {
+ err = PTR_ERR(conf);
+ goto errout;
+ }
+
+ conf->ops = ops;
+ return conf;
+
+errout:
+ if (ops)
+ module_put(ops->owner);
+
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(textsearch_prepare);
+
+/**
+ * textsearch_destroy - destroy a search configuration
+ * @conf: search configuration
+ *
+ * Releases all references of the configuration and frees
+ * up the memory.
+ */
+void textsearch_destroy(struct ts_config *conf)
+{
+ if (conf->ops) {
+ if (conf->ops->destroy)
+ conf->ops->destroy(conf);
+ module_put(conf->ops->owner);
+ }
+
+ kfree(conf);
+}
+EXPORT_SYMBOL(textsearch_destroy);
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
new file mode 100644
index 000000000..cdb9c7658
--- /dev/null
+++ b/lib/timerqueue.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Generic Timer-queue
+ *
+ * Manages a simple queue of timers, ordered by expiration time.
+ * Uses rbtrees for quick list adds and expiration.
+ *
+ * NOTE: All of the following functions need to be serialized
+ * to avoid races. No locking is done by this library code.
+ */
+
+#include <linux/bug.h>
+#include <linux/timerqueue.h>
+#include <linux/rbtree.h>
+#include <linux/export.h>
+
+#define __node_2_tq(_n) \
+ rb_entry((_n), struct timerqueue_node, node)
+
+static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b)
+{
+ return __node_2_tq(a)->expires < __node_2_tq(b)->expires;
+}
+
+/**
+ * timerqueue_add - Adds timer to timerqueue.
+ *
+ * @head: head of timerqueue
+ * @node: timer node to be added
+ *
+ * Adds the timer node to the timerqueue, sorted by the node's expires
+ * value. Returns true if the newly added timer is the first expiring timer in
+ * the queue.
+ */
+bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
+{
+ /* Make sure we don't add nodes that are already added */
+ WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
+
+ return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
+}
+EXPORT_SYMBOL_GPL(timerqueue_add);
+
+/**
+ * timerqueue_del - Removes a timer from the timerqueue.
+ *
+ * @head: head of timerqueue
+ * @node: timer node to be removed
+ *
+ * Removes the timer node from the timerqueue. Returns true if the queue is
+ * not empty after the remove.
+ */
+bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
+{
+ WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
+
+ rb_erase_cached(&node->node, &head->rb_root);
+ RB_CLEAR_NODE(&node->node);
+
+ return !RB_EMPTY_ROOT(&head->rb_root.rb_root);
+}
+EXPORT_SYMBOL_GPL(timerqueue_del);
+
+/**
+ * timerqueue_iterate_next - Returns the timer after the provided timer
+ *
+ * @node: Pointer to a timer.
+ *
+ * Provides the timer that is after the given node. This is used, when
+ * necessary, to iterate through the list of timers in a timer list
+ * without modifying the list.
+ */
+struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node)
+{
+ struct rb_node *next;
+
+ if (!node)
+ return NULL;
+ next = rb_next(&node->node);
+ if (!next)
+ return NULL;
+ return container_of(next, struct timerqueue_node, node);
+}
+EXPORT_SYMBOL_GPL(timerqueue_iterate_next);
diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c
new file mode 100644
index 000000000..62b4e8b3c
--- /dev/null
+++ b/lib/trace_readwrite.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Register read and write tracepoints
+ *
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <asm-generic/io.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rwmmio.h>
+
+#ifdef CONFIG_TRACE_MMIO_ACCESS
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_write(caller_addr, caller_addr0, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_write_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write);
+
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_post_write(caller_addr, caller_addr0, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_post_write_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write);
+
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_read(caller_addr, caller_addr0, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_read_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read);
+
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0)
+{
+ trace_rwmmio_post_read(caller_addr, caller_addr0, val, width, addr);
+}
+EXPORT_SYMBOL_GPL(log_post_read_mmio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read);
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
new file mode 100644
index 000000000..e5f30f917
--- /dev/null
+++ b/lib/ts_bm.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lib/ts_bm.c Boyer-Moore text search implementation
+ *
+ * Authors: Pablo Neira Ayuso <pablo@eurodev.net>
+ *
+ * ==========================================================================
+ *
+ * Implements Boyer-Moore string matching algorithm:
+ *
+ * [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
+ * Communications of the Association for Computing Machinery,
+ * 20(10), 1977, pp. 762-772.
+ * https://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
+ *
+ * [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
+ * http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
+ *
+ * Note: Since Boyer-Moore (BM) performs searches for matchings from right
+ * to left, it's still possible that a matching could be spread over
+ * multiple blocks, in that case this algorithm won't find any coincidence.
+ *
+ * If you're willing to ensure that such thing won't ever happen, use the
+ * Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
+ * the proper string search algorithm depending on your setting.
+ *
+ * Say you're using the textsearch infrastructure for filtering, NIDS or
+ * any similar security focused purpose, then go KMP. Otherwise, if you
+ * really care about performance, say you're classifying packets to apply
+ * Quality of Service (QoS) policies, and you don't mind about possible
+ * matchings spread over multiple fragments, then go BM.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/textsearch.h>
+
+/* Alphabet size, use ASCII */
+#define ASIZE 256
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(args, format...)
+#endif
+
+struct ts_bm
+{
+ u8 * pattern;
+ unsigned int patlen;
+ unsigned int bad_shift[ASIZE];
+ unsigned int good_shift[];
+};
+
+static unsigned int matchpat(const u8 *pattern, unsigned int patlen,
+ const u8 *text, bool icase)
+{
+ unsigned int i;
+
+ for (i = 0; i < patlen; i++) {
+ u8 t = *(text-i);
+
+ if (icase)
+ t = toupper(t);
+
+ if (t != *(pattern-i))
+ break;
+ }
+
+ return i;
+}
+
+static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
+{
+ struct ts_bm *bm = ts_config_priv(conf);
+ unsigned int i, text_len, consumed = state->offset;
+ const u8 *text;
+ int bs;
+ const u8 icase = conf->flags & TS_IGNORECASE;
+
+ for (;;) {
+ int shift = bm->patlen - 1;
+
+ text_len = conf->get_next_block(consumed, &text, conf, state);
+
+ if (unlikely(text_len == 0))
+ break;
+
+ while (shift < text_len) {
+ DEBUGP("Searching in position %d (%c)\n",
+ shift, text[shift]);
+
+ i = matchpat(&bm->pattern[bm->patlen-1], bm->patlen,
+ &text[shift], icase);
+ if (i == bm->patlen) {
+ /* London calling... */
+ DEBUGP("found!\n");
+ return consumed + (shift-(bm->patlen-1));
+ }
+
+ bs = bm->bad_shift[text[shift-i]];
+
+ /* Now jumping to... */
+ shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
+ }
+ consumed += text_len;
+ }
+
+ return UINT_MAX;
+}
+
+static int subpattern(u8 *pattern, int i, int j, int g)
+{
+ int x = i+g-1, y = j+g-1, ret = 0;
+
+ while(pattern[x--] == pattern[y--]) {
+ if (y < 0) {
+ ret = 1;
+ break;
+ }
+ if (--g == 0) {
+ ret = pattern[i-1] != pattern[j-1];
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void compute_prefix_tbl(struct ts_bm *bm, int flags)
+{
+ int i, j, g;
+
+ for (i = 0; i < ASIZE; i++)
+ bm->bad_shift[i] = bm->patlen;
+ for (i = 0; i < bm->patlen - 1; i++) {
+ bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
+ if (flags & TS_IGNORECASE)
+ bm->bad_shift[tolower(bm->pattern[i])]
+ = bm->patlen - 1 - i;
+ }
+
+ /* Compute the good shift array, used to match reocurrences
+ * of a subpattern */
+ bm->good_shift[0] = 1;
+ for (i = 1; i < bm->patlen; i++)
+ bm->good_shift[i] = bm->patlen;
+ for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
+ for (j = i-1; j >= 1-g ; j--)
+ if (subpattern(bm->pattern, i, j, g)) {
+ bm->good_shift[g] = bm->patlen-j-g;
+ break;
+ }
+ }
+}
+
+static struct ts_config *bm_init(const void *pattern, unsigned int len,
+ gfp_t gfp_mask, int flags)
+{
+ struct ts_config *conf;
+ struct ts_bm *bm;
+ int i;
+ unsigned int prefix_tbl_len = len * sizeof(unsigned int);
+ size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
+
+ conf = alloc_ts_config(priv_size, gfp_mask);
+ if (IS_ERR(conf))
+ return conf;
+
+ conf->flags = flags;
+ bm = ts_config_priv(conf);
+ bm->patlen = len;
+ bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ bm->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(bm->pattern, pattern, len);
+ compute_prefix_tbl(bm, flags);
+
+ return conf;
+}
+
+static void *bm_get_pattern(struct ts_config *conf)
+{
+ struct ts_bm *bm = ts_config_priv(conf);
+ return bm->pattern;
+}
+
+static unsigned int bm_get_pattern_len(struct ts_config *conf)
+{
+ struct ts_bm *bm = ts_config_priv(conf);
+ return bm->patlen;
+}
+
+static struct ts_ops bm_ops = {
+ .name = "bm",
+ .find = bm_find,
+ .init = bm_init,
+ .get_pattern = bm_get_pattern,
+ .get_pattern_len = bm_get_pattern_len,
+ .owner = THIS_MODULE,
+ .list = LIST_HEAD_INIT(bm_ops.list)
+};
+
+static int __init init_bm(void)
+{
+ return textsearch_register(&bm_ops);
+}
+
+static void __exit exit_bm(void)
+{
+ textsearch_unregister(&bm_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_bm);
+module_exit(exit_bm);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
new file mode 100644
index 000000000..64fd9015a
--- /dev/null
+++ b/lib/ts_fsm.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lib/ts_fsm.c A naive finite state machine text search approach
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ *
+ * ==========================================================================
+ *
+ * A finite state machine consists of n states (struct ts_fsm_token)
+ * representing the pattern as a finite automaton. The data is read
+ * sequentially on an octet basis. Every state token specifies the number
+ * of recurrences and the type of value accepted which can be either a
+ * specific character or ctype based set of characters. The available
+ * type of recurrences include 1, (0|1), [0 n], and [1 n].
+ *
+ * The algorithm differs between strict/non-strict mode specifying
+ * whether the pattern has to start at the first octet. Strict mode
+ * is enabled by default and can be disabled by inserting
+ * TS_FSM_HEAD_IGNORE as the first token in the chain.
+ *
+ * The runtime performance of the algorithm should be around O(n),
+ * however while in strict mode the average runtime can be better.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/textsearch.h>
+#include <linux/textsearch_fsm.h>
+
+struct ts_fsm
+{
+ unsigned int ntokens;
+ struct ts_fsm_token tokens[];
+};
+
+/* other values derived from ctype.h */
+#define _A 0x100 /* ascii */
+#define _W 0x200 /* wildcard */
+
+/* Map to _ctype flags and some magic numbers */
+static const u16 token_map[TS_FSM_TYPE_MAX+1] = {
+ [TS_FSM_SPECIFIC] = 0,
+ [TS_FSM_WILDCARD] = _W,
+ [TS_FSM_CNTRL] = _C,
+ [TS_FSM_LOWER] = _L,
+ [TS_FSM_UPPER] = _U,
+ [TS_FSM_PUNCT] = _P,
+ [TS_FSM_SPACE] = _S,
+ [TS_FSM_DIGIT] = _D,
+ [TS_FSM_XDIGIT] = _D | _X,
+ [TS_FSM_ALPHA] = _U | _L,
+ [TS_FSM_ALNUM] = _U | _L | _D,
+ [TS_FSM_PRINT] = _P | _U | _L | _D | _SP,
+ [TS_FSM_GRAPH] = _P | _U | _L | _D,
+ [TS_FSM_ASCII] = _A,
+};
+
+static const u16 token_lookup_tbl[256] = {
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
+_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */
+_W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */
+_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */
+_W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */
+_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */
+_W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */
+_W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */
+_W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */
+_W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */
+_W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */
+_W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */
+_W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */
+_W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */
+_W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */
+_W, _W, _W, _W, /* 128-131 */
+_W, _W, _W, _W, /* 132-135 */
+_W, _W, _W, _W, /* 136-139 */
+_W, _W, _W, _W, /* 140-143 */
+_W, _W, _W, _W, /* 144-147 */
+_W, _W, _W, _W, /* 148-151 */
+_W, _W, _W, _W, /* 152-155 */
+_W, _W, _W, _W, /* 156-159 */
+_W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */
+_W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */
+_W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */
+_W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */
+_W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */
+_W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */
+_W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */
+_W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */
+
+static inline int match_token(struct ts_fsm_token *t, u8 d)
+{
+ if (t->type)
+ return (token_lookup_tbl[d] & t->type) != 0;
+ else
+ return t->value == d;
+}
+
+static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state)
+{
+ struct ts_fsm *fsm = ts_config_priv(conf);
+ struct ts_fsm_token *cur = NULL, *next;
+ unsigned int match_start, block_idx = 0, tok_idx;
+ unsigned block_len = 0, strict, consumed = state->offset;
+ const u8 *data;
+
+#define GET_NEXT_BLOCK() \
+({ consumed += block_idx; \
+ block_idx = 0; \
+ block_len = conf->get_next_block(consumed, &data, conf, state); })
+
+#define TOKEN_MISMATCH() \
+ do { \
+ if (strict) \
+ goto no_match; \
+ block_idx++; \
+ goto startover; \
+ } while(0)
+
+#define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK())
+
+ if (end_of_data())
+ goto no_match;
+
+ strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE;
+
+startover:
+ match_start = consumed + block_idx;
+
+ for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) {
+ cur = &fsm->tokens[tok_idx];
+
+ if (likely(tok_idx < (fsm->ntokens - 1)))
+ next = &fsm->tokens[tok_idx + 1];
+ else
+ next = NULL;
+
+ switch (cur->recur) {
+ case TS_FSM_SINGLE:
+ if (end_of_data())
+ goto no_match;
+
+ if (!match_token(cur, data[block_idx]))
+ TOKEN_MISMATCH();
+ break;
+
+ case TS_FSM_PERHAPS:
+ if (end_of_data() ||
+ !match_token(cur, data[block_idx]))
+ continue;
+ break;
+
+ case TS_FSM_MULTI:
+ if (end_of_data())
+ goto no_match;
+
+ if (!match_token(cur, data[block_idx]))
+ TOKEN_MISMATCH();
+
+ block_idx++;
+ fallthrough;
+
+ case TS_FSM_ANY:
+ if (next == NULL)
+ goto found_match;
+
+ if (end_of_data())
+ continue;
+
+ while (!match_token(next, data[block_idx])) {
+ if (!match_token(cur, data[block_idx]))
+ TOKEN_MISMATCH();
+ block_idx++;
+ if (end_of_data())
+ goto no_match;
+ }
+ continue;
+
+ /*
+ * Optimization: Prefer small local loop over jumping
+ * back and forth until garbage at head is munched.
+ */
+ case TS_FSM_HEAD_IGNORE:
+ if (end_of_data())
+ continue;
+
+ while (!match_token(next, data[block_idx])) {
+ /*
+ * Special case, don't start over upon
+ * a mismatch, give the user the
+ * chance to specify the type of data
+ * allowed to be ignored.
+ */
+ if (!match_token(cur, data[block_idx]))
+ goto no_match;
+
+ block_idx++;
+ if (end_of_data())
+ goto no_match;
+ }
+
+ match_start = consumed + block_idx;
+ continue;
+ }
+
+ block_idx++;
+ }
+
+ if (end_of_data())
+ goto found_match;
+
+no_match:
+ return UINT_MAX;
+
+found_match:
+ state->offset = consumed + block_idx;
+ return match_start;
+}
+
+static struct ts_config *fsm_init(const void *pattern, unsigned int len,
+ gfp_t gfp_mask, int flags)
+{
+ int i, err = -EINVAL;
+ struct ts_config *conf;
+ struct ts_fsm *fsm;
+ struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern;
+ unsigned int ntokens = len / sizeof(*tokens);
+ size_t priv_size = sizeof(*fsm) + len;
+
+ if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
+ goto errout;
+
+ if (flags & TS_IGNORECASE)
+ goto errout;
+
+ for (i = 0; i < ntokens; i++) {
+ struct ts_fsm_token *t = &tokens[i];
+
+ if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX)
+ goto errout;
+
+ if (t->recur == TS_FSM_HEAD_IGNORE &&
+ (i != 0 || i == (ntokens - 1)))
+ goto errout;
+ }
+
+ conf = alloc_ts_config(priv_size, gfp_mask);
+ if (IS_ERR(conf))
+ return conf;
+
+ conf->flags = flags;
+ fsm = ts_config_priv(conf);
+ fsm->ntokens = ntokens;
+ memcpy(fsm->tokens, pattern, len);
+
+ for (i = 0; i < fsm->ntokens; i++) {
+ struct ts_fsm_token *t = &fsm->tokens[i];
+ t->type = token_map[t->type];
+ }
+
+ return conf;
+
+errout:
+ return ERR_PTR(err);
+}
+
+static void *fsm_get_pattern(struct ts_config *conf)
+{
+ struct ts_fsm *fsm = ts_config_priv(conf);
+ return fsm->tokens;
+}
+
+static unsigned int fsm_get_pattern_len(struct ts_config *conf)
+{
+ struct ts_fsm *fsm = ts_config_priv(conf);
+ return fsm->ntokens * sizeof(struct ts_fsm_token);
+}
+
+static struct ts_ops fsm_ops = {
+ .name = "fsm",
+ .find = fsm_find,
+ .init = fsm_init,
+ .get_pattern = fsm_get_pattern,
+ .get_pattern_len = fsm_get_pattern_len,
+ .owner = THIS_MODULE,
+ .list = LIST_HEAD_INIT(fsm_ops.list)
+};
+
+static int __init init_fsm(void)
+{
+ return textsearch_register(&fsm_ops);
+}
+
+static void __exit exit_fsm(void)
+{
+ textsearch_unregister(&fsm_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_fsm);
+module_exit(exit_fsm);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
new file mode 100644
index 000000000..c77a3d537
--- /dev/null
+++ b/lib/ts_kmp.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ *
+ * ==========================================================================
+ *
+ * Implements a linear-time string-matching algorithm due to Knuth,
+ * Morris, and Pratt [1]. Their algorithm avoids the explicit
+ * computation of the transition function DELTA altogether. Its
+ * matching time is O(n), for n being length(text), using just an
+ * auxiliary function PI[1..m], for m being length(pattern),
+ * precomputed from the pattern in time O(m). The array PI allows
+ * the transition function DELTA to be computed efficiently
+ * "on the fly" as needed. Roughly speaking, for any state
+ * "q" = 0,1,...,m and any character "a" in SIGMA, the value
+ * PI["q"] contains the information that is independent of "a" and
+ * is needed to compute DELTA("q", "a") [2]. Since the array PI
+ * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we
+ * save a factor of |SIGMA| in the preprocessing time by computing
+ * PI rather than DELTA.
+ *
+ * [1] Cormen, Leiserson, Rivest, Stein
+ * Introdcution to Algorithms, 2nd Edition, MIT Press
+ * [2] See finite automaton theory
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/textsearch.h>
+
+struct ts_kmp
+{
+ u8 * pattern;
+ unsigned int pattern_len;
+ unsigned int prefix_tbl[];
+};
+
+static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
+{
+ struct ts_kmp *kmp = ts_config_priv(conf);
+ unsigned int i, q = 0, text_len, consumed = state->offset;
+ const u8 *text;
+ const int icase = conf->flags & TS_IGNORECASE;
+
+ for (;;) {
+ text_len = conf->get_next_block(consumed, &text, conf, state);
+
+ if (unlikely(text_len == 0))
+ break;
+
+ for (i = 0; i < text_len; i++) {
+ while (q > 0 && kmp->pattern[q]
+ != (icase ? toupper(text[i]) : text[i]))
+ q = kmp->prefix_tbl[q - 1];
+ if (kmp->pattern[q]
+ == (icase ? toupper(text[i]) : text[i]))
+ q++;
+ if (unlikely(q == kmp->pattern_len)) {
+ state->offset = consumed + i + 1;
+ return state->offset - kmp->pattern_len;
+ }
+ }
+
+ consumed += text_len;
+ }
+
+ return UINT_MAX;
+}
+
+static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
+ unsigned int *prefix_tbl, int flags)
+{
+ unsigned int k, q;
+ const u8 icase = flags & TS_IGNORECASE;
+
+ for (k = 0, q = 1; q < len; q++) {
+ while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
+ != (icase ? toupper(pattern[q]) : pattern[q]))
+ k = prefix_tbl[k-1];
+ if ((icase ? toupper(pattern[k]) : pattern[k])
+ == (icase ? toupper(pattern[q]) : pattern[q]))
+ k++;
+ prefix_tbl[q] = k;
+ }
+}
+
+static struct ts_config *kmp_init(const void *pattern, unsigned int len,
+ gfp_t gfp_mask, int flags)
+{
+ struct ts_config *conf;
+ struct ts_kmp *kmp;
+ int i;
+ unsigned int prefix_tbl_len = len * sizeof(unsigned int);
+ size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
+
+ conf = alloc_ts_config(priv_size, gfp_mask);
+ if (IS_ERR(conf))
+ return conf;
+
+ conf->flags = flags;
+ kmp = ts_config_priv(conf);
+ kmp->pattern_len = len;
+ compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
+ kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(kmp->pattern, pattern, len);
+
+ return conf;
+}
+
+static void *kmp_get_pattern(struct ts_config *conf)
+{
+ struct ts_kmp *kmp = ts_config_priv(conf);
+ return kmp->pattern;
+}
+
+static unsigned int kmp_get_pattern_len(struct ts_config *conf)
+{
+ struct ts_kmp *kmp = ts_config_priv(conf);
+ return kmp->pattern_len;
+}
+
+static struct ts_ops kmp_ops = {
+ .name = "kmp",
+ .find = kmp_find,
+ .init = kmp_init,
+ .get_pattern = kmp_get_pattern,
+ .get_pattern_len = kmp_get_pattern_len,
+ .owner = THIS_MODULE,
+ .list = LIST_HEAD_INIT(kmp_ops.list)
+};
+
+static int __init init_kmp(void)
+{
+ return textsearch_register(&kmp_ops);
+}
+
+static void __exit exit_kmp(void)
+{
+ textsearch_unregister(&kmp_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_kmp);
+module_exit(exit_kmp);
diff --git a/lib/ubsan.c b/lib/ubsan.c
new file mode 100644
index 000000000..3f90810f9
--- /dev/null
+++ b/lib/ubsan.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UBSAN error reporting functions
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/ubsan.h>
+#include <kunit/test-bug.h>
+
+#include "ubsan.h"
+
+#ifdef CONFIG_UBSAN_TRAP
+/*
+ * Only include matches for UBSAN checks that are actually compiled in.
+ * The mappings of struct SanitizerKind (the -fsanitize=xxx args) to
+ * enum SanitizerHandler (the traps) in Clang is in clang/lib/CodeGen/.
+ */
+const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
+{
+ switch (check_type) {
+#ifdef CONFIG_UBSAN_BOUNDS
+ /*
+ * SanitizerKind::ArrayBounds and SanitizerKind::LocalBounds
+ * emit SanitizerHandler::OutOfBounds.
+ */
+ case ubsan_out_of_bounds:
+ return "UBSAN: array index out of bounds";
+#endif
+#ifdef CONFIG_UBSAN_SHIFT
+ /*
+ * SanitizerKind::ShiftBase and SanitizerKind::ShiftExponent
+ * emit SanitizerHandler::ShiftOutOfBounds.
+ */
+ case ubsan_shift_out_of_bounds:
+ return "UBSAN: shift out of bounds";
+#endif
+#ifdef CONFIG_UBSAN_DIV_ZERO
+ /*
+ * SanitizerKind::IntegerDivideByZero emits
+ * SanitizerHandler::DivremOverflow.
+ */
+ case ubsan_divrem_overflow:
+ return "UBSAN: divide/remainder overflow";
+#endif
+#ifdef CONFIG_UBSAN_UNREACHABLE
+ /*
+ * SanitizerKind::Unreachable emits
+ * SanitizerHandler::BuiltinUnreachable.
+ */
+ case ubsan_builtin_unreachable:
+ return "UBSAN: unreachable code";
+#endif
+#if defined(CONFIG_UBSAN_BOOL) || defined(CONFIG_UBSAN_ENUM)
+ /*
+ * SanitizerKind::Bool and SanitizerKind::Enum emit
+ * SanitizerHandler::LoadInvalidValue.
+ */
+ case ubsan_load_invalid_value:
+ return "UBSAN: loading invalid value";
+#endif
+#ifdef CONFIG_UBSAN_ALIGNMENT
+ /*
+ * SanitizerKind::Alignment emits SanitizerHandler::TypeMismatch
+ * or SanitizerHandler::AlignmentAssumption.
+ */
+ case ubsan_alignment_assumption:
+ return "UBSAN: alignment assumption";
+ case ubsan_type_mismatch:
+ return "UBSAN: type mismatch";
+#endif
+ default:
+ return "UBSAN: unrecognized failure code";
+ }
+}
+
+#else
+static const char * const type_check_kinds[] = {
+ "load of",
+ "store to",
+ "reference binding to",
+ "member access within",
+ "member call on",
+ "constructor call on",
+ "downcast of",
+ "downcast of"
+};
+
+#define REPORTED_BIT 31
+
+#if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN)
+#define COLUMN_MASK (~(1U << REPORTED_BIT))
+#define LINE_MASK (~0U)
+#else
+#define COLUMN_MASK (~0U)
+#define LINE_MASK (~(1U << REPORTED_BIT))
+#endif
+
+#define VALUE_LENGTH 40
+
+static bool was_reported(struct source_location *location)
+{
+ return test_and_set_bit(REPORTED_BIT, &location->reported);
+}
+
+static bool suppress_report(struct source_location *loc)
+{
+ return current->in_ubsan || was_reported(loc);
+}
+
+static bool type_is_int(struct type_descriptor *type)
+{
+ return type->type_kind == type_kind_int;
+}
+
+static bool type_is_signed(struct type_descriptor *type)
+{
+ WARN_ON(!type_is_int(type));
+ return type->type_info & 1;
+}
+
+static unsigned type_bit_width(struct type_descriptor *type)
+{
+ return 1 << (type->type_info >> 1);
+}
+
+static bool is_inline_int(struct type_descriptor *type)
+{
+ unsigned inline_bits = sizeof(unsigned long)*8;
+ unsigned bits = type_bit_width(type);
+
+ WARN_ON(!type_is_int(type));
+
+ return bits <= inline_bits;
+}
+
+static s_max get_signed_val(struct type_descriptor *type, void *val)
+{
+ if (is_inline_int(type)) {
+ unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type);
+ unsigned long ulong_val = (unsigned long)val;
+
+ return ((s_max)ulong_val) << extra_bits >> extra_bits;
+ }
+
+ if (type_bit_width(type) == 64)
+ return *(s64 *)val;
+
+ return *(s_max *)val;
+}
+
+static bool val_is_negative(struct type_descriptor *type, void *val)
+{
+ return type_is_signed(type) && get_signed_val(type, val) < 0;
+}
+
+static u_max get_unsigned_val(struct type_descriptor *type, void *val)
+{
+ if (is_inline_int(type))
+ return (unsigned long)val;
+
+ if (type_bit_width(type) == 64)
+ return *(u64 *)val;
+
+ return *(u_max *)val;
+}
+
+static void val_to_string(char *str, size_t size, struct type_descriptor *type,
+ void *value)
+{
+ if (type_is_int(type)) {
+ if (type_bit_width(type) == 128) {
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
+ u_max val = get_unsigned_val(type, value);
+
+ scnprintf(str, size, "0x%08x%08x%08x%08x",
+ (u32)(val >> 96),
+ (u32)(val >> 64),
+ (u32)(val >> 32),
+ (u32)(val));
+#else
+ WARN_ON(1);
+#endif
+ } else if (type_is_signed(type)) {
+ scnprintf(str, size, "%lld",
+ (s64)get_signed_val(type, value));
+ } else {
+ scnprintf(str, size, "%llu",
+ (u64)get_unsigned_val(type, value));
+ }
+ }
+}
+
+static void ubsan_prologue(struct source_location *loc, const char *reason)
+{
+ current->in_ubsan++;
+
+ pr_err("========================================"
+ "========================================\n");
+ pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
+ loc->line & LINE_MASK, loc->column & COLUMN_MASK);
+
+ kunit_fail_current_test("%s in %s", reason, loc->file_name);
+}
+
+static void ubsan_epilogue(void)
+{
+ dump_stack();
+ pr_err("========================================"
+ "========================================\n");
+
+ current->in_ubsan--;
+
+ check_panic_on_warn("UBSAN");
+}
+
+void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
+{
+ struct overflow_data *data = _data;
+ char rhs_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "division-overflow");
+
+ val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
+
+ if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1)
+ pr_err("division of %s by -1 cannot be represented in type %s\n",
+ rhs_val_str, data->type->type_name);
+ else
+ pr_err("division by zero\n");
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
+
+static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
+{
+ if (suppress_report(data->location))
+ return;
+
+ ubsan_prologue(data->location, "null-ptr-deref");
+
+ pr_err("%s null pointer of type %s\n",
+ type_check_kinds[data->type_check_kind],
+ data->type->type_name);
+
+ ubsan_epilogue();
+}
+
+static void handle_misaligned_access(struct type_mismatch_data_common *data,
+ unsigned long ptr)
+{
+ if (suppress_report(data->location))
+ return;
+
+ ubsan_prologue(data->location, "misaligned-access");
+
+ pr_err("%s misaligned address %p for type %s\n",
+ type_check_kinds[data->type_check_kind],
+ (void *)ptr, data->type->type_name);
+ pr_err("which requires %ld byte alignment\n", data->alignment);
+
+ ubsan_epilogue();
+}
+
+static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
+ unsigned long ptr)
+{
+ if (suppress_report(data->location))
+ return;
+
+ ubsan_prologue(data->location, "object-size-mismatch");
+ pr_err("%s address %p with insufficient space\n",
+ type_check_kinds[data->type_check_kind],
+ (void *) ptr);
+ pr_err("for an object of type %s\n", data->type->type_name);
+ ubsan_epilogue();
+}
+
+static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
+ unsigned long ptr)
+{
+ unsigned long flags = user_access_save();
+
+ if (!ptr)
+ handle_null_ptr_deref(data);
+ else if (data->alignment && !IS_ALIGNED(ptr, data->alignment))
+ handle_misaligned_access(data, ptr);
+ else
+ handle_object_size_mismatch(data, ptr);
+
+ user_access_restore(flags);
+}
+
+void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
+ void *ptr)
+{
+ struct type_mismatch_data_common common_data = {
+ .location = &data->location,
+ .type = data->type,
+ .alignment = data->alignment,
+ .type_check_kind = data->type_check_kind
+ };
+
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
+}
+EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
+
+void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr)
+{
+ struct type_mismatch_data_v1 *data = _data;
+ struct type_mismatch_data_common common_data = {
+ .location = &data->location,
+ .type = data->type,
+ .alignment = 1UL << data->log_alignment,
+ .type_check_kind = data->type_check_kind
+ };
+
+ ubsan_type_mismatch_common(&common_data, (unsigned long)ptr);
+}
+EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
+
+void __ubsan_handle_out_of_bounds(void *_data, void *index)
+{
+ struct out_of_bounds_data *data = _data;
+ char index_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "array-index-out-of-bounds");
+
+ val_to_string(index_str, sizeof(index_str), data->index_type, index);
+ pr_err("index %s is out of range for type %s\n", index_str,
+ data->array_type->type_name);
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
+
+void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs)
+{
+ struct shift_out_of_bounds_data *data = _data;
+ struct type_descriptor *rhs_type = data->rhs_type;
+ struct type_descriptor *lhs_type = data->lhs_type;
+ char rhs_str[VALUE_LENGTH];
+ char lhs_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
+
+ if (suppress_report(&data->location))
+ goto out;
+
+ ubsan_prologue(&data->location, "shift-out-of-bounds");
+
+ val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
+ val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
+
+ if (val_is_negative(rhs_type, rhs))
+ pr_err("shift exponent %s is negative\n", rhs_str);
+
+ else if (get_unsigned_val(rhs_type, rhs) >=
+ type_bit_width(lhs_type))
+ pr_err("shift exponent %s is too large for %u-bit type %s\n",
+ rhs_str,
+ type_bit_width(lhs_type),
+ lhs_type->type_name);
+ else if (val_is_negative(lhs_type, lhs))
+ pr_err("left shift of negative value %s\n",
+ lhs_str);
+ else
+ pr_err("left shift of %s by %s places cannot be"
+ " represented in type %s\n",
+ lhs_str, rhs_str,
+ lhs_type->type_name);
+
+ ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
+
+
+void __ubsan_handle_builtin_unreachable(void *_data)
+{
+ struct unreachable_data *data = _data;
+ ubsan_prologue(&data->location, "unreachable");
+ pr_err("calling __builtin_unreachable()\n");
+ ubsan_epilogue();
+ panic("can't return from __builtin_unreachable()");
+}
+EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
+
+void __ubsan_handle_load_invalid_value(void *_data, void *val)
+{
+ struct invalid_value_data *data = _data;
+ char val_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
+
+ if (suppress_report(&data->location))
+ goto out;
+
+ ubsan_prologue(&data->location, "invalid-load");
+
+ val_to_string(val_str, sizeof(val_str), data->type, val);
+
+ pr_err("load of value %s is not a valid value for type %s\n",
+ val_str, data->type->type_name);
+
+ ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
+}
+EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset)
+{
+ struct alignment_assumption_data *data = _data;
+ unsigned long real_ptr;
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "alignment-assumption");
+
+ if (offset)
+ pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+ align, offset, data->type->type_name);
+ else
+ pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+ align, data->type->type_name);
+
+ real_ptr = ptr - offset;
+ pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+ offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+ real_ptr & (align - 1));
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
+
+#endif /* !CONFIG_UBSAN_TRAP */
diff --git a/lib/ubsan.h b/lib/ubsan.h
new file mode 100644
index 000000000..5d99ab819
--- /dev/null
+++ b/lib/ubsan.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LIB_UBSAN_H
+#define _LIB_UBSAN_H
+
+/*
+ * ABI defined by Clang's UBSAN enum SanitizerHandler:
+ * https://github.com/llvm/llvm-project/blob/release/16.x/clang/lib/CodeGen/CodeGenFunction.h#L113
+ */
+enum ubsan_checks {
+ ubsan_add_overflow,
+ ubsan_builtin_unreachable,
+ ubsan_cfi_check_fail,
+ ubsan_divrem_overflow,
+ ubsan_dynamic_type_cache_miss,
+ ubsan_float_cast_overflow,
+ ubsan_function_type_mismatch,
+ ubsan_implicit_conversion,
+ ubsan_invalid_builtin,
+ ubsan_invalid_objc_cast,
+ ubsan_load_invalid_value,
+ ubsan_missing_return,
+ ubsan_mul_overflow,
+ ubsan_negate_overflow,
+ ubsan_nullability_arg,
+ ubsan_nullability_return,
+ ubsan_nonnull_arg,
+ ubsan_nonnull_return,
+ ubsan_out_of_bounds,
+ ubsan_pointer_overflow,
+ ubsan_shift_out_of_bounds,
+ ubsan_sub_overflow,
+ ubsan_type_mismatch,
+ ubsan_alignment_assumption,
+ ubsan_vla_bound_not_positive,
+};
+
+enum {
+ type_kind_int = 0,
+ type_kind_float = 1,
+ type_unknown = 0xffff
+};
+
+struct type_descriptor {
+ u16 type_kind;
+ u16 type_info;
+ char type_name[1];
+};
+
+struct source_location {
+ const char *file_name;
+ union {
+ unsigned long reported;
+ struct {
+ u32 line;
+ u32 column;
+ };
+ };
+};
+
+struct overflow_data {
+ struct source_location location;
+ struct type_descriptor *type;
+};
+
+struct type_mismatch_data {
+ struct source_location location;
+ struct type_descriptor *type;
+ unsigned long alignment;
+ unsigned char type_check_kind;
+};
+
+struct type_mismatch_data_v1 {
+ struct source_location location;
+ struct type_descriptor *type;
+ unsigned char log_alignment;
+ unsigned char type_check_kind;
+};
+
+struct type_mismatch_data_common {
+ struct source_location *location;
+ struct type_descriptor *type;
+ unsigned long alignment;
+ unsigned char type_check_kind;
+};
+
+struct nonnull_arg_data {
+ struct source_location location;
+ struct source_location attr_location;
+ int arg_index;
+};
+
+struct out_of_bounds_data {
+ struct source_location location;
+ struct type_descriptor *array_type;
+ struct type_descriptor *index_type;
+};
+
+struct shift_out_of_bounds_data {
+ struct source_location location;
+ struct type_descriptor *lhs_type;
+ struct type_descriptor *rhs_type;
+};
+
+struct unreachable_data {
+ struct source_location location;
+};
+
+struct invalid_value_data {
+ struct source_location location;
+ struct type_descriptor *type;
+};
+
+struct alignment_assumption_data {
+ struct source_location location;
+ struct source_location assumption_location;
+ struct type_descriptor *type;
+};
+
+#if defined(CONFIG_ARCH_SUPPORTS_INT128)
+typedef __int128 s_max;
+typedef unsigned __int128 u_max;
+#else
+typedef s64 s_max;
+typedef u64 u_max;
+#endif
+
+void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void __ubsan_handle_out_of_bounds(void *_data, void *index);
+void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void __ubsan_handle_builtin_unreachable(void *_data);
+void __ubsan_handle_load_invalid_value(void *_data, void *val);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
+
+#endif
diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c
new file mode 100644
index 000000000..726e394c9
--- /dev/null
+++ b/lib/ucmpdi2.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ */
+
+#include <linux/module.h>
+#include <linux/libgcc.h>
+
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ const DWunion au = {.ll = a};
+ const DWunion bu = {.ll = b};
+
+ if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
+ return 0;
+ else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
+ return 2;
+ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
+ return 0;
+ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
+ return 2;
+ return 1;
+}
+EXPORT_SYMBOL(__ucmpdi2);
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
new file mode 100644
index 000000000..0a559a423
--- /dev/null
+++ b/lib/ucs2_string.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ucs2_string.h>
+#include <linux/module.h>
+
+/* Return the number of unicode characters in data */
+unsigned long
+ucs2_strnlen(const ucs2_char_t *s, size_t maxlength)
+{
+ unsigned long length = 0;
+
+ while (*s++ != 0 && length < maxlength)
+ length++;
+ return length;
+}
+EXPORT_SYMBOL(ucs2_strnlen);
+
+unsigned long
+ucs2_strlen(const ucs2_char_t *s)
+{
+ return ucs2_strnlen(s, ~0UL);
+}
+EXPORT_SYMBOL(ucs2_strlen);
+
+/*
+ * Return the number of bytes is the length of this string
+ * Note: this is NOT the same as the number of unicode characters
+ */
+unsigned long
+ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
+{
+ return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t);
+}
+EXPORT_SYMBOL(ucs2_strsize);
+
+int
+ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+{
+ while (1) {
+ if (len == 0)
+ return 0;
+ if (*a < *b)
+ return -1;
+ if (*a > *b)
+ return 1;
+ if (*a == 0) /* implies *b == 0 */
+ return 0;
+ a++;
+ b++;
+ len--;
+ }
+}
+EXPORT_SYMBOL(ucs2_strncmp);
+
+unsigned long
+ucs2_utf8size(const ucs2_char_t *src)
+{
+ unsigned long i;
+ unsigned long j = 0;
+
+ for (i = 0; src[i]; i++) {
+ u16 c = src[i];
+
+ if (c >= 0x800)
+ j += 3;
+ else if (c >= 0x80)
+ j += 2;
+ else
+ j += 1;
+ }
+
+ return j;
+}
+EXPORT_SYMBOL(ucs2_utf8size);
+
+/*
+ * copy at most maxlength bytes of whole utf8 characters to dest from the
+ * ucs2 string src.
+ *
+ * The return value is the number of characters copied, not including the
+ * final NUL character.
+ */
+unsigned long
+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
+{
+ unsigned int i;
+ unsigned long j = 0;
+ unsigned long limit = ucs2_strnlen(src, maxlength);
+
+ for (i = 0; maxlength && i < limit; i++) {
+ u16 c = src[i];
+
+ if (c >= 0x800) {
+ if (maxlength < 3)
+ break;
+ maxlength -= 3;
+ dest[j++] = 0xe0 | (c & 0xf000) >> 12;
+ dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
+ dest[j++] = 0x80 | (c & 0x003f);
+ } else if (c >= 0x80) {
+ if (maxlength < 2)
+ break;
+ maxlength -= 2;
+ dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
+ dest[j++] = 0x80 | (c & 0x03f);
+ } else {
+ maxlength -= 1;
+ dest[j++] = c & 0x7f;
+ }
+ }
+ if (maxlength)
+ dest[j] = '\0';
+ return j;
+}
+EXPORT_SYMBOL(ucs2_as_utf8);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000..d29fe29c6
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
+#include <linux/fault-inject-usercopy.h>
+#include <linux/instrumented.h>
+#include <linux/uaccess.h>
+#include <linux/nospec.h>
+
+/* out-of-line parts */
+
+#ifndef INLINE_COPY_FROM_USER
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ might_fault();
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
+ /*
+ * Ensure that bad access_ok() speculation will not
+ * lead to nasty side effects *after* the copy is
+ * finished:
+ */
+ barrier_nospec();
+ instrument_copy_from_user_before(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ }
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+EXPORT_SYMBOL(_copy_from_user);
+#endif
+
+#ifndef INLINE_COPY_TO_USER
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ if (should_fail_usercopy())
+ return n;
+ if (likely(access_ok(to, n))) {
+ instrument_copy_to_user(to, from, n);
+ n = raw_copy_to_user(to, from, n);
+ }
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+#endif
+
+/**
+ * check_zeroed_user: check if a userspace buffer only contains zero bytes
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
+ * userspace addresses (and is more efficient because we don't care where the
+ * first non-zero byte is).
+ *
+ * Returns:
+ * * 0: There were non-zero bytes present in the buffer.
+ * * 1: The buffer was full of zero bytes.
+ * * -EFAULT: access to userspace failed.
+ */
+int check_zeroed_user(const void __user *from, size_t size)
+{
+ unsigned long val;
+ uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+ if (unlikely(size == 0))
+ return 1;
+
+ from -= align;
+ size += align;
+
+ if (!user_read_access_begin(from, size))
+ return -EFAULT;
+
+ unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+ if (align)
+ val &= ~aligned_byte_mask(align);
+
+ while (size > sizeof(unsigned long)) {
+ if (unlikely(val))
+ goto done;
+
+ from += sizeof(unsigned long);
+ size -= sizeof(unsigned long);
+
+ unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+ }
+
+ if (size < sizeof(unsigned long))
+ val &= aligned_byte_mask(size);
+
+done:
+ user_read_access_end();
+ return (val == 0);
+err_fault:
+ user_read_access_end();
+ return -EFAULT;
+}
+EXPORT_SYMBOL(check_zeroed_user);
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000..e309b4c5b
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Unified UUID/GUID definition
+ *
+ * Copyright (C) 2009, 2016 Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/uuid.h>
+#include <linux/random.h>
+
+const guid_t guid_null;
+EXPORT_SYMBOL(guid_null);
+const uuid_t uuid_null;
+EXPORT_SYMBOL(uuid_null);
+
+const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
+
+/**
+ * generate_random_uuid - generate a random UUID
+ * @uuid: where to put the generated UUID
+ *
+ * Random UUID interface
+ *
+ * Used to create a Boot ID or a filesystem UUID/GUID, but can be
+ * useful for other kernel drivers.
+ */
+void generate_random_uuid(unsigned char uuid[16])
+{
+ get_random_bytes(uuid, 16);
+ /* Set UUID version to 4 --- truly random generation */
+ uuid[6] = (uuid[6] & 0x0F) | 0x40;
+ /* Set the UUID variant to DCE */
+ uuid[8] = (uuid[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_uuid);
+
+void generate_random_guid(unsigned char guid[16])
+{
+ get_random_bytes(guid, 16);
+ /* Set GUID version to 4 --- truly random generation */
+ guid[7] = (guid[7] & 0x0F) | 0x40;
+ /* Set the GUID variant to DCE */
+ guid[8] = (guid[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_guid);
+
+static void __uuid_gen_common(__u8 b[16])
+{
+ get_random_bytes(b, 16);
+ /* reversion 0b10 */
+ b[8] = (b[8] & 0x3F) | 0x80;
+}
+
+void guid_gen(guid_t *lu)
+{
+ __uuid_gen_common(lu->b);
+ /* version 4 : random generation */
+ lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
+}
+EXPORT_SYMBOL_GPL(guid_gen);
+
+void uuid_gen(uuid_t *bu)
+{
+ __uuid_gen_common(bu->b);
+ /* version 4 : random generation */
+ bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
+}
+EXPORT_SYMBOL_GPL(uuid_gen);
+
+/**
+ * uuid_is_valid - checks if a UUID string is valid
+ * @uuid: UUID string to check
+ *
+ * Description:
+ * It checks if the UUID string is following the format:
+ * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ *
+ * where x is a hex digit.
+ *
+ * Return: true if input is valid UUID string.
+ */
+bool uuid_is_valid(const char *uuid)
+{
+ unsigned int i;
+
+ for (i = 0; i < UUID_STRING_LEN; i++) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (uuid[i] != '-')
+ return false;
+ } else if (!isxdigit(uuid[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(uuid_is_valid);
+
+static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16])
+{
+ static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34};
+ unsigned int i;
+
+ if (!uuid_is_valid(uuid))
+ return -EINVAL;
+
+ for (i = 0; i < 16; i++) {
+ int hi = hex_to_bin(uuid[si[i] + 0]);
+ int lo = hex_to_bin(uuid[si[i] + 1]);
+
+ b[ei[i]] = (hi << 4) | lo;
+ }
+
+ return 0;
+}
+
+int guid_parse(const char *uuid, guid_t *u)
+{
+ return __uuid_parse(uuid, u->b, guid_index);
+}
+EXPORT_SYMBOL(guid_parse);
+
+int uuid_parse(const char *uuid, uuid_t *u)
+{
+ return __uuid_parse(uuid, u->b, uuid_index);
+}
+EXPORT_SYMBOL(uuid_parse);
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
new file mode 100644
index 000000000..d883ac299
--- /dev/null
+++ b/lib/vdso/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config HAVE_GENERIC_VDSO
+ bool
+
+if HAVE_GENERIC_VDSO
+
+config GENERIC_GETTIMEOFDAY
+ bool
+ help
+ This is a generic implementation of gettimeofday vdso.
+ Each architecture that enables this feature has to
+ provide the fallback implementation.
+
+config GENERIC_VDSO_32
+ bool
+ depends on GENERIC_GETTIMEOFDAY && !64BIT
+ help
+ This config option helps to avoid possible performance issues
+ in 32 bit only architectures.
+
+config GENERIC_COMPAT_VDSO
+ bool
+ help
+ This config option enables the compat VDSO layer.
+
+config GENERIC_VDSO_TIME_NS
+ bool
+ help
+ Selected by architectures which support time namespaces in the
+ VDSO
+
+endif
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
new file mode 100644
index 000000000..9f031eafc
--- /dev/null
+++ b/lib/vdso/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
+
+c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
+
+# This cmd checks that the vdso library does not contain dynamic relocations.
+# It has to be called after the linking of the vdso library and requires it
+# as a parameter.
+#
+# As a workaround for some GNU ld ports which produce unneeded R_*_NONE
+# dynamic relocations, ignore R_*_NONE.
+quiet_cmd_vdso_check = VDSOCHK $@
+ cmd_vdso_check = if $(READELF) -rW $@ | grep -v _NONE | grep -q " R_\w*_"; \
+ then (echo >&2 "$@: dynamic relocations are not supported"; \
+ rm -f $@; /bin/false); fi
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
new file mode 100644
index 000000000..ce2f69552
--- /dev/null
+++ b/lib/vdso/gettimeofday.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic userspace implementations of gettimeofday() and similar.
+ */
+#include <vdso/datapage.h>
+#include <vdso/helpers.h>
+
+#ifndef vdso_calc_delta
+/*
+ * Default implementation which works for all sane clocksources. That
+ * obviously excludes x86/TSC.
+ */
+static __always_inline
+u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+{
+ return ((cycles - last) & mask) * mult;
+}
+#endif
+
+#ifndef vdso_shift_ns
+static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
+{
+ return ns >> shift;
+}
+#endif
+
+#ifndef __arch_vdso_hres_capable
+static inline bool __arch_vdso_hres_capable(void)
+{
+ return true;
+}
+#endif
+
+#ifndef vdso_clocksource_ok
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+}
+#endif
+
+#ifndef vdso_cycles_ok
+static inline bool vdso_cycles_ok(u64 cycles)
+{
+ return true;
+}
+#endif
+
+#ifdef CONFIG_TIME_NS
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_data *vd;
+ const struct timens_offset *offs = &vdns->offset[clk];
+ const struct vdso_timestamp *vdso_ts;
+ u64 cycles, last, ns;
+ u32 seq;
+ s64 sec;
+
+ vd = vdns - (clk == CLOCK_MONOTONIC_RAW ? CS_RAW : CS_HRES_COARSE);
+ vd = __arch_get_timens_vdso_data(vd);
+ if (clk != CLOCK_MONOTONIC_RAW)
+ vd = &vd[CS_HRES_COARSE];
+ else
+ vd = &vd[CS_RAW];
+ vdso_ts = &vd->basetime[clk];
+
+ do {
+ seq = vdso_read_begin(vd);
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
+ cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
+ ns = vdso_ts->nsec;
+ last = vd->cycle_last;
+ ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
+ ns = vdso_shift_ns(ns, vd->shift);
+ sec = vdso_ts->sec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /* Add the namespace offset */
+ sec += offs->sec;
+ ns += offs->nsec;
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+#else
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return NULL;
+}
+
+static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ return -EINVAL;
+}
+#endif
+
+static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ u64 cycles, last, sec, ns;
+ u32 seq;
+
+ /* Allows to compile the high resolution parts out */
+ if (!__arch_vdso_hres_capable())
+ return -1;
+
+ do {
+ /*
+ * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
+ * enabled tasks have a special VVAR page installed which
+ * has vd->seq set to 1 and vd->clock_mode set to
+ * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
+ * this does not affect performance because if vd->seq is
+ * odd, i.e. a concurrent update is in progress the extra
+ * check for vd->clock_mode is just a few extra
+ * instructions while spin waiting for vd->seq to become
+ * even again.
+ */
+ while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_hres_timens(vd, clk, ts);
+ cpu_relax();
+ }
+ smp_rmb();
+
+ if (unlikely(!vdso_clocksource_ok(vd)))
+ return -1;
+
+ cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
+ ns = vdso_ts->nsec;
+ last = vd->cycle_last;
+ ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
+ ns = vdso_shift_ns(ns, vd->shift);
+ sec = vdso_ts->sec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+#ifdef CONFIG_TIME_NS
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_data *vd = __arch_get_timens_vdso_data(vdns);
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ const struct timens_offset *offs = &vdns->offset[clk];
+ u64 nsec;
+ s64 sec;
+ s32 seq;
+
+ do {
+ seq = vdso_read_begin(vd);
+ sec = vdso_ts->sec;
+ nsec = vdso_ts->nsec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ /* Add the namespace offset */
+ sec += offs->sec;
+ nsec += offs->nsec;
+
+ /*
+ * Do this outside the loop: a race inside the loop could result
+ * in __iter_div_u64_rem() being extremely slow.
+ */
+ ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
+ ts->tv_nsec = nsec;
+ return 0;
+}
+#else
+static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ return -1;
+}
+#endif
+
+static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
+ struct __kernel_timespec *ts)
+{
+ const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
+ u32 seq;
+
+ do {
+ /*
+ * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
+ * do_hres().
+ */
+ while ((seq = READ_ONCE(vd->seq)) & 1) {
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ return do_coarse_timens(vd, clk, ts);
+ cpu_relax();
+ }
+ smp_rmb();
+
+ ts->tv_sec = vdso_ts->sec;
+ ts->tv_nsec = vdso_ts->nsec;
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ return 0;
+}
+
+static __always_inline int
+__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ u32 msk;
+
+ /* Check for negative values or invalid clocks */
+ if (unlikely((u32) clock >= MAX_CLOCKS))
+ return -1;
+
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (likely(msk & VDSO_HRES))
+ vd = &vd[CS_HRES_COARSE];
+ else if (msk & VDSO_COARSE)
+ return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
+ else if (msk & VDSO_RAW)
+ vd = &vd[CS_RAW];
+ else
+ return -1;
+
+ return do_hres(vd, clock, ts);
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *ts)
+{
+ int ret = __cvdso_clock_gettime_common(vd, clock, ts);
+
+ if (unlikely(ret))
+ return clock_gettime_fallback(clock, ts);
+ return 0;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock, ts);
+}
+
+#ifdef BUILD_VDSO32
+static __maybe_unused int
+__cvdso_clock_gettime32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
+{
+ struct __kernel_timespec ts;
+ int ret;
+
+ ret = __cvdso_clock_gettime_common(vd, clock, &ts);
+
+ if (unlikely(ret))
+ return clock_gettime32_fallback(clock, res);
+
+ /* For ret == 0 */
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+
+ return ret;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock, res);
+}
+#endif /* BUILD_VDSO32 */
+
+static __maybe_unused int
+__cvdso_gettimeofday_data(const struct vdso_data *vd,
+ struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+
+ if (likely(tv != NULL)) {
+ struct __kernel_timespec ts;
+
+ if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
+ return gettimeofday_fallback(tv, tz);
+
+ tv->tv_sec = ts.tv_sec;
+ tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
+ }
+
+ if (unlikely(tz != NULL)) {
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_timens_vdso_data(vd);
+
+ tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
+ tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
+ }
+
+ return 0;
+}
+
+static __maybe_unused int
+__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv, tz);
+}
+
+#ifdef VDSO_HAS_TIME
+static __maybe_unused __kernel_old_time_t
+__cvdso_time_data(const struct vdso_data *vd, __kernel_old_time_t *time)
+{
+ __kernel_old_time_t t;
+
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_timens_vdso_data(vd);
+
+ t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
+
+ if (time)
+ *time = t;
+
+ return t;
+}
+
+static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
+{
+ return __cvdso_time_data(__arch_get_vdso_data(), time);
+}
+#endif /* VDSO_HAS_TIME */
+
+#ifdef VDSO_HAS_CLOCK_GETRES
+static __maybe_unused
+int __cvdso_clock_getres_common(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
+{
+ u32 msk;
+ u64 ns;
+
+ /* Check for negative values or invalid clocks */
+ if (unlikely((u32) clock >= MAX_CLOCKS))
+ return -1;
+
+ if (IS_ENABLED(CONFIG_TIME_NS) &&
+ vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+ vd = __arch_get_timens_vdso_data(vd);
+
+ /*
+ * Convert the clockid to a bitmask and use it to check which
+ * clocks are handled in the VDSO directly.
+ */
+ msk = 1U << clock;
+ if (msk & (VDSO_HRES | VDSO_RAW)) {
+ /*
+ * Preserves the behaviour of posix_get_hrtimer_res().
+ */
+ ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ } else if (msk & VDSO_COARSE) {
+ /*
+ * Preserves the behaviour of posix_get_coarse_res().
+ */
+ ns = LOW_RES_NSEC;
+ } else {
+ return -1;
+ }
+
+ if (likely(res)) {
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
+ }
+ return 0;
+}
+
+static __maybe_unused
+int __cvdso_clock_getres_data(const struct vdso_data *vd, clockid_t clock,
+ struct __kernel_timespec *res)
+{
+ int ret = __cvdso_clock_getres_common(vd, clock, res);
+
+ if (unlikely(ret))
+ return clock_getres_fallback(clock, res);
+ return 0;
+}
+
+static __maybe_unused
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock, res);
+}
+
+#ifdef BUILD_VDSO32
+static __maybe_unused int
+__cvdso_clock_getres_time32_data(const struct vdso_data *vd, clockid_t clock,
+ struct old_timespec32 *res)
+{
+ struct __kernel_timespec ts;
+ int ret;
+
+ ret = __cvdso_clock_getres_common(vd, clock, &ts);
+
+ if (unlikely(ret))
+ return clock_getres32_fallback(clock, res);
+
+ if (likely(res)) {
+ res->tv_sec = ts.tv_sec;
+ res->tv_nsec = ts.tv_nsec;
+ }
+ return ret;
+}
+
+static __maybe_unused int
+__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
+{
+ return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
+ clock, res);
+}
+#endif /* BUILD_VDSO32 */
+#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
new file mode 100644
index 000000000..2aa408441
--- /dev/null
+++ b/lib/vsprintf.c
@@ -0,0 +1,3727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+/*
+ * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
+ * - changed to provide snprintf and vsnprintf functions
+ * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
+ * - scnprintf and vscnprintf
+ */
+
+#include <linux/stdarg.h>
+#include <linux/build_bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errname.h>
+#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/math64.h>
+#include <linux/uaccess.h>
+#include <linux/ioport.h>
+#include <linux/dcache.h>
+#include <linux/cred.h>
+#include <linux/rtc.h>
+#include <linux/sprintf.h>
+#include <linux/time.h>
+#include <linux/uuid.h>
+#include <linux/of.h>
+#include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
+#include <linux/property.h>
+#include <linux/notifier.h>
+#ifdef CONFIG_BLOCK
+#include <linux/blkdev.h>
+#endif
+
+#include "../mm/internal.h" /* For the trace_print_flags arrays */
+
+#include <asm/page.h> /* for PAGE_SIZE */
+#include <asm/byteorder.h> /* cpu_to_le16 */
+#include <asm/unaligned.h>
+
+#include <linux/string_helpers.h>
+#include "kstrtox.h"
+
+/* Disable pointer hashing if requested */
+bool no_hash_pointers __ro_after_init;
+EXPORT_SYMBOL_GPL(no_hash_pointers);
+
+static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
+{
+ const char *cp;
+ unsigned long long result = 0ULL;
+ size_t prefix_chars;
+ unsigned int rv;
+
+ cp = _parse_integer_fixup_radix(startp, &base);
+ prefix_chars = cp - startp;
+ if (prefix_chars < max_chars) {
+ rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
+ /* FIXME */
+ cp += (rv & ~KSTRTOX_OVERFLOW);
+ } else {
+ /* Field too short for prefix + digit, skip over without converting */
+ cp = startp + max_chars;
+ }
+
+ if (endp)
+ *endp = (char *)cp;
+
+ return result;
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ *
+ * This function has caveats. Please use kstrtoull instead.
+ */
+noinline
+unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
+{
+ return simple_strntoull(cp, INT_MAX, endp, base);
+}
+EXPORT_SYMBOL(simple_strtoull);
+
+/**
+ * simple_strtoul - convert a string to an unsigned long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ *
+ * This function has caveats. Please use kstrtoul instead.
+ */
+unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
+{
+ return simple_strtoull(cp, endp, base);
+}
+EXPORT_SYMBOL(simple_strtoul);
+
+/**
+ * simple_strtol - convert a string to a signed long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ *
+ * This function has caveats. Please use kstrtol instead.
+ */
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ if (*cp == '-')
+ return -simple_strtoul(cp + 1, endp, base);
+
+ return simple_strtoul(cp, endp, base);
+}
+EXPORT_SYMBOL(simple_strtol);
+
+static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
+ unsigned int base)
+{
+ /*
+ * simple_strntoull() safely handles receiving max_chars==0 in the
+ * case cp[0] == '-' && max_chars == 1.
+ * If max_chars == 0 we can drop through and pass it to simple_strntoull()
+ * and the content of *cp is irrelevant.
+ */
+ if (*cp == '-' && max_chars > 0)
+ return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
+
+ return simple_strntoull(cp, max_chars, endp, base);
+}
+
+/**
+ * simple_strtoll - convert a string to a signed long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ *
+ * This function has caveats. Please use kstrtoll instead.
+ */
+long long simple_strtoll(const char *cp, char **endp, unsigned int base)
+{
+ return simple_strntoll(cp, INT_MAX, endp, base);
+}
+EXPORT_SYMBOL(simple_strtoll);
+
+static noinline_for_stack
+int skip_atoi(const char **s)
+{
+ int i = 0;
+
+ do {
+ i = i*10 + *((*s)++) - '0';
+ } while (isdigit(**s));
+
+ return i;
+}
+
+/*
+ * Decimal conversion is by far the most typical, and is used for
+ * /proc and /sys data. This directly impacts e.g. top performance
+ * with many processes running. We optimize it for speed by emitting
+ * two characters at a time, using a 200 byte lookup table. This
+ * roughly halves the number of multiplications compared to computing
+ * the digits one at a time. Implementation strongly inspired by the
+ * previous version, which in turn used ideas described at
+ * <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
+ * from the author, Douglas W. Jones).
+ *
+ * It turns out there is precisely one 26 bit fixed-point
+ * approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
+ * holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
+ * range happens to be somewhat larger (x <= 1073741898), but that's
+ * irrelevant for our purpose.
+ *
+ * For dividing a number in the range [10^4, 10^6-1] by 100, we still
+ * need a 32x32->64 bit multiply, so we simply use the same constant.
+ *
+ * For dividing a number in the range [100, 10^4-1] by 100, there are
+ * several options. The simplest is (x * 0x147b) >> 19, which is valid
+ * for all x <= 43698.
+ */
+
+static const u16 decpair[100] = {
+#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
+ _( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
+ _(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
+ _(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
+ _(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
+ _(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
+ _(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
+ _(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
+ _(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
+ _(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
+ _(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
+#undef _
+};
+
+/*
+ * This will print a single '0' even if r == 0, since we would
+ * immediately jump to out_r where two 0s would be written but only
+ * one of them accounted for in buf. This is needed by ip4_string
+ * below. All other callers pass a non-zero value of r.
+*/
+static noinline_for_stack
+char *put_dec_trunc8(char *buf, unsigned r)
+{
+ unsigned q;
+
+ /* 1 <= r < 10^8 */
+ if (r < 100)
+ goto out_r;
+
+ /* 100 <= r < 10^8 */
+ q = (r * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+
+ /* 1 <= q < 10^6 */
+ if (q < 100)
+ goto out_q;
+
+ /* 100 <= q < 10^6 */
+ r = (q * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[q - 100*r];
+ buf += 2;
+
+ /* 1 <= r < 10^4 */
+ if (r < 100)
+ goto out_r;
+
+ /* 100 <= r < 10^4 */
+ q = (r * 0x147b) >> 19;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+out_q:
+ /* 1 <= q < 100 */
+ r = q;
+out_r:
+ /* 1 <= r < 100 */
+ *((u16 *)buf) = decpair[r];
+ buf += r < 10 ? 1 : 2;
+ return buf;
+}
+
+#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
+static noinline_for_stack
+char *put_dec_full8(char *buf, unsigned r)
+{
+ unsigned q;
+
+ /* 0 <= r < 10^8 */
+ q = (r * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+
+ /* 0 <= q < 10^6 */
+ r = (q * (u64)0x28f5c29) >> 32;
+ *((u16 *)buf) = decpair[q - 100*r];
+ buf += 2;
+
+ /* 0 <= r < 10^4 */
+ q = (r * 0x147b) >> 19;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+
+ /* 0 <= q < 100 */
+ *((u16 *)buf) = decpair[q];
+ buf += 2;
+ return buf;
+}
+
+static noinline_for_stack
+char *put_dec(char *buf, unsigned long long n)
+{
+ if (n >= 100*1000*1000)
+ buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
+ /* 1 <= n <= 1.6e11 */
+ if (n >= 100*1000*1000)
+ buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
+ /* 1 <= n < 1e8 */
+ return put_dec_trunc8(buf, n);
+}
+
+#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
+
+static void
+put_dec_full4(char *buf, unsigned r)
+{
+ unsigned q;
+
+ /* 0 <= r < 10^4 */
+ q = (r * 0x147b) >> 19;
+ *((u16 *)buf) = decpair[r - 100*q];
+ buf += 2;
+ /* 0 <= q < 100 */
+ *((u16 *)buf) = decpair[q];
+}
+
+/*
+ * Call put_dec_full4 on x % 10000, return x / 10000.
+ * The approximation x/10000 == (x * 0x346DC5D7) >> 43
+ * holds for all x < 1,128,869,999. The largest value this
+ * helper will ever be asked to convert is 1,125,520,955.
+ * (second call in the put_dec code, assuming n is all-ones).
+ */
+static noinline_for_stack
+unsigned put_dec_helper4(char *buf, unsigned x)
+{
+ uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
+
+ put_dec_full4(buf, x - q * 10000);
+ return q;
+}
+
+/* Based on code by Douglas W. Jones found at
+ * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
+ * (with permission from the author).
+ * Performs no 64-bit division and hence should be fast on 32-bit machines.
+ */
+static
+char *put_dec(char *buf, unsigned long long n)
+{
+ uint32_t d3, d2, d1, q, h;
+
+ if (n < 100*1000*1000)
+ return put_dec_trunc8(buf, n);
+
+ d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
+ h = (n >> 32);
+ d2 = (h ) & 0xffff;
+ d3 = (h >> 16); /* implicit "& 0xffff" */
+
+ /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
+ = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
+ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
+ q = put_dec_helper4(buf, q);
+
+ q += 7671 * d3 + 9496 * d2 + 6 * d1;
+ q = put_dec_helper4(buf+4, q);
+
+ q += 4749 * d3 + 42 * d2;
+ q = put_dec_helper4(buf+8, q);
+
+ q += 281 * d3;
+ buf += 12;
+ if (q)
+ buf = put_dec_trunc8(buf, q);
+ else while (buf[-1] == '0')
+ --buf;
+
+ return buf;
+}
+
+#endif
+
+/*
+ * Convert passed number to decimal string.
+ * Returns the length of string. On buffer overflow, returns 0.
+ *
+ * If speed is not important, use snprintf(). It's easy to read the code.
+ */
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
+{
+ /* put_dec requires 2-byte alignment of the buffer. */
+ char tmp[sizeof(num) * 3] __aligned(2);
+ int idx, len;
+
+ /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
+ if (num <= 9) {
+ tmp[0] = '0' + num;
+ len = 1;
+ } else {
+ len = put_dec(tmp, num) - tmp;
+ }
+
+ if (len > size || width > size)
+ return 0;
+
+ if (width > len) {
+ width = width - len;
+ for (idx = 0; idx < width; idx++)
+ buf[idx] = ' ';
+ } else {
+ width = 0;
+ }
+
+ for (idx = 0; idx < len; ++idx)
+ buf[idx + width] = tmp[len - idx - 1];
+
+ return len + width;
+}
+
+#define SIGN 1 /* unsigned/signed, must be 1 */
+#define LEFT 2 /* left justified */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */
+#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
+#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
+
+static_assert(SIGN == 1);
+static_assert(ZEROPAD == ('0' - ' '));
+static_assert(SMALL == ('a' ^ 'A'));
+
+enum format_type {
+ FORMAT_TYPE_NONE, /* Just a string part */
+ FORMAT_TYPE_WIDTH,
+ FORMAT_TYPE_PRECISION,
+ FORMAT_TYPE_CHAR,
+ FORMAT_TYPE_STR,
+ FORMAT_TYPE_PTR,
+ FORMAT_TYPE_PERCENT_CHAR,
+ FORMAT_TYPE_INVALID,
+ FORMAT_TYPE_LONG_LONG,
+ FORMAT_TYPE_ULONG,
+ FORMAT_TYPE_LONG,
+ FORMAT_TYPE_UBYTE,
+ FORMAT_TYPE_BYTE,
+ FORMAT_TYPE_USHORT,
+ FORMAT_TYPE_SHORT,
+ FORMAT_TYPE_UINT,
+ FORMAT_TYPE_INT,
+ FORMAT_TYPE_SIZE_T,
+ FORMAT_TYPE_PTRDIFF
+};
+
+struct printf_spec {
+ unsigned int type:8; /* format_type enum */
+ signed int field_width:24; /* width of output field */
+ unsigned int flags:8; /* flags to number() */
+ unsigned int base:8; /* number base, 8, 10 or 16 only */
+ signed int precision:16; /* # of digits/chars */
+} __packed;
+static_assert(sizeof(struct printf_spec) == 8);
+
+#define FIELD_WIDTH_MAX ((1 << 23) - 1)
+#define PRECISION_MAX ((1 << 15) - 1)
+
+static noinline_for_stack
+char *number(char *buf, char *end, unsigned long long num,
+ struct printf_spec spec)
+{
+ /* put_dec requires 2-byte alignment of the buffer. */
+ char tmp[3 * sizeof(num)] __aligned(2);
+ char sign;
+ char locase;
+ int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
+ int i;
+ bool is_zero = num == 0LL;
+ int field_width = spec.field_width;
+ int precision = spec.precision;
+
+ /* locase = 0 or 0x20. ORing digits or letters with 'locase'
+ * produces same digits or (maybe lowercased) letters */
+ locase = (spec.flags & SMALL);
+ if (spec.flags & LEFT)
+ spec.flags &= ~ZEROPAD;
+ sign = 0;
+ if (spec.flags & SIGN) {
+ if ((signed long long)num < 0) {
+ sign = '-';
+ num = -(signed long long)num;
+ field_width--;
+ } else if (spec.flags & PLUS) {
+ sign = '+';
+ field_width--;
+ } else if (spec.flags & SPACE) {
+ sign = ' ';
+ field_width--;
+ }
+ }
+ if (need_pfx) {
+ if (spec.base == 16)
+ field_width -= 2;
+ else if (!is_zero)
+ field_width--;
+ }
+
+ /* generate full string in tmp[], in reverse order */
+ i = 0;
+ if (num < spec.base)
+ tmp[i++] = hex_asc_upper[num] | locase;
+ else if (spec.base != 10) { /* 8 or 16 */
+ int mask = spec.base - 1;
+ int shift = 3;
+
+ if (spec.base == 16)
+ shift = 4;
+ do {
+ tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase);
+ num >>= shift;
+ } while (num);
+ } else { /* base 10 */
+ i = put_dec(tmp, num) - tmp;
+ }
+
+ /* printing 100 using %2d gives "100", not "00" */
+ if (i > precision)
+ precision = i;
+ /* leading space padding */
+ field_width -= precision;
+ if (!(spec.flags & (ZEROPAD | LEFT))) {
+ while (--field_width >= 0) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ /* sign */
+ if (sign) {
+ if (buf < end)
+ *buf = sign;
+ ++buf;
+ }
+ /* "0x" / "0" prefix */
+ if (need_pfx) {
+ if (spec.base == 16 || !is_zero) {
+ if (buf < end)
+ *buf = '0';
+ ++buf;
+ }
+ if (spec.base == 16) {
+ if (buf < end)
+ *buf = ('X' | locase);
+ ++buf;
+ }
+ }
+ /* zero or space padding */
+ if (!(spec.flags & LEFT)) {
+ char c = ' ' + (spec.flags & ZEROPAD);
+
+ while (--field_width >= 0) {
+ if (buf < end)
+ *buf = c;
+ ++buf;
+ }
+ }
+ /* hmm even more zero padding? */
+ while (i <= --precision) {
+ if (buf < end)
+ *buf = '0';
+ ++buf;
+ }
+ /* actual digits of result */
+ while (--i >= 0) {
+ if (buf < end)
+ *buf = tmp[i];
+ ++buf;
+ }
+ /* trailing space padding */
+ while (--field_width >= 0) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+
+ return buf;
+}
+
+static noinline_for_stack
+char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
+{
+ struct printf_spec spec;
+
+ spec.type = FORMAT_TYPE_PTR;
+ spec.field_width = 2 + 2 * size; /* 0x + hex */
+ spec.flags = SPECIAL | SMALL | ZEROPAD;
+ spec.base = 16;
+ spec.precision = -1;
+
+ return number(buf, end, num, spec);
+}
+
+static void move_right(char *buf, char *end, unsigned len, unsigned spaces)
+{
+ size_t size;
+ if (buf >= end) /* nowhere to put anything */
+ return;
+ size = end - buf;
+ if (size <= spaces) {
+ memset(buf, ' ', size);
+ return;
+ }
+ if (len) {
+ if (len > size - spaces)
+ len = size - spaces;
+ memmove(buf + spaces, buf, len);
+ }
+ memset(buf, ' ', spaces);
+}
+
+/*
+ * Handle field width padding for a string.
+ * @buf: current buffer position
+ * @n: length of string
+ * @end: end of output buffer
+ * @spec: for field width and flags
+ * Returns: new buffer position after padding.
+ */
+static noinline_for_stack
+char *widen_string(char *buf, int n, char *end, struct printf_spec spec)
+{
+ unsigned spaces;
+
+ if (likely(n >= spec.field_width))
+ return buf;
+ /* we want to pad the sucker */
+ spaces = spec.field_width - n;
+ if (!(spec.flags & LEFT)) {
+ move_right(buf - n, end, n, spaces);
+ return buf + spaces;
+ }
+ while (spaces--) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+/* Handle string from a well known address. */
+static char *string_nocheck(char *buf, char *end, const char *s,
+ struct printf_spec spec)
+{
+ int len = 0;
+ int lim = spec.precision;
+
+ while (lim--) {
+ char c = *s++;
+ if (!c)
+ break;
+ if (buf < end)
+ *buf = c;
+ ++buf;
+ ++len;
+ }
+ return widen_string(buf, len, end, spec);
+}
+
+static char *err_ptr(char *buf, char *end, void *ptr,
+ struct printf_spec spec)
+{
+ int err = PTR_ERR(ptr);
+ const char *sym = errname(err);
+
+ if (sym)
+ return string_nocheck(buf, end, sym, spec);
+
+ /*
+ * Somebody passed ERR_PTR(-1234) or some other non-existing
+ * Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to
+ * printing it as its decimal representation.
+ */
+ spec.flags |= SIGN;
+ spec.base = 10;
+ return number(buf, end, err, spec);
+}
+
+/* Be careful: error messages must fit into the given buffer. */
+static char *error_string(char *buf, char *end, const char *s,
+ struct printf_spec spec)
+{
+ /*
+ * Hard limit to avoid a completely insane messages. It actually
+ * works pretty well because most error messages are in
+ * the many pointer format modifiers.
+ */
+ if (spec.precision == -1)
+ spec.precision = 2 * sizeof(void *);
+
+ return string_nocheck(buf, end, s, spec);
+}
+
+/*
+ * Do not call any complex external code here. Nested printk()/vsprintf()
+ * might cause infinite loops. Failures might break printk() and would
+ * be hard to debug.
+ */
+static const char *check_pointer_msg(const void *ptr)
+{
+ if (!ptr)
+ return "(null)";
+
+ if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
+ return "(efault)";
+
+ return NULL;
+}
+
+static int check_pointer(char **buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ const char *err_msg;
+
+ err_msg = check_pointer_msg(ptr);
+ if (err_msg) {
+ *buf = error_string(*buf, end, err_msg, spec);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static noinline_for_stack
+char *string(char *buf, char *end, const char *s,
+ struct printf_spec spec)
+{
+ if (check_pointer(&buf, end, s, spec))
+ return buf;
+
+ return string_nocheck(buf, end, s, spec);
+}
+
+static char *pointer_string(char *buf, char *end,
+ const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
+static bool filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+
+ /* Pairs with smp_rmb() before reading ptr_key. */
+ smp_wmb();
+ WRITE_ONCE(filled_random_ptr_key, true);
+ return NOTIFY_DONE;
+}
+
+static int __init vsprintf_init_hashval(void)
+{
+ static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key };
+ execute_with_initialized_rng(&fill_ptr_key_nb);
+ return 0;
+}
+subsys_initcall(vsprintf_init_hashval)
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ unsigned long hashval;
+
+ if (!READ_ONCE(filled_random_ptr_key))
+ return -EBUSY;
+
+ /* Pairs with smp_wmb() after writing ptr_key. */
+ smp_rmb();
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ *hashval_out = hashval;
+ return 0;
+}
+
+int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ return __ptr_to_hashval(ptr, hashval_out);
+}
+
+static char *ptr_to_id(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
+ unsigned long hashval;
+ int ret;
+
+ /*
+ * Print the real pointer value for NULL and error pointers,
+ * as they are not actual addresses.
+ */
+ if (IS_ERR_OR_NULL(ptr))
+ return pointer_string(buf, end, ptr, spec);
+
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
+ ret = __ptr_to_hashval(ptr, &hashval);
+ if (ret) {
+ spec.field_width = 2 * sizeof(ptr);
+ /* string length must be less than default_width */
+ return error_string(buf, end, str, spec);
+ }
+
+ return pointer_string(buf, end, (const void *)hashval, spec);
+}
+
+static char *default_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ /*
+ * default is to _not_ leak addresses, so hash before printing,
+ * unless no_hash_pointers is specified on the command line.
+ */
+ if (unlikely(no_hash_pointers))
+ return pointer_string(buf, end, ptr, spec);
+
+ return ptr_to_id(buf, end, ptr, spec);
+}
+
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ switch (kptr_restrict) {
+ case 0:
+ /* Handle as %p, hash and do _not_ leak addresses. */
+ return default_pointer(buf, end, ptr, spec);
+ case 1: {
+ const struct cred *cred;
+
+ /*
+ * kptr_restrict==1 cannot be used in IRQ context
+ * because its test for CAP_SYSLOG would be meaningless.
+ */
+ if (in_hardirq() || in_serving_softirq() || in_nmi()) {
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(ptr);
+ return error_string(buf, end, "pK-error", spec);
+ }
+
+ /*
+ * Only print the real pointer value if the current
+ * process has CAP_SYSLOG and is running with the
+ * same credentials it started with. This is because
+ * access to files is checked at open() time, but %pK
+ * checks permission at read() time. We don't want to
+ * leak pointer values if a binary opens a file using
+ * %pK and then elevates privileges before reading it.
+ */
+ cred = current_cred();
+ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+ !uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid))
+ ptr = NULL;
+ break;
+ }
+ case 2:
+ default:
+ /* Always print 0's for %pK */
+ ptr = NULL;
+ break;
+ }
+
+ return pointer_string(buf, end, ptr, spec);
+}
+
+static noinline_for_stack
+char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
+ const char *fmt)
+{
+ const char *array[4], *s;
+ const struct dentry *p;
+ int depth;
+ int i, n;
+
+ switch (fmt[1]) {
+ case '2': case '3': case '4':
+ depth = fmt[1] - '0';
+ break;
+ default:
+ depth = 1;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < depth; i++, d = p) {
+ if (check_pointer(&buf, end, d, spec)) {
+ rcu_read_unlock();
+ return buf;
+ }
+
+ p = READ_ONCE(d->d_parent);
+ array[i] = READ_ONCE(d->d_name.name);
+ if (p == d) {
+ if (i)
+ array[i] = "";
+ i++;
+ break;
+ }
+ }
+ s = array[--i];
+ for (n = 0; n != spec.precision; n++, buf++) {
+ char c = *s++;
+ if (!c) {
+ if (!i)
+ break;
+ c = '/';
+ s = array[--i];
+ }
+ if (buf < end)
+ *buf = c;
+ }
+ rcu_read_unlock();
+ return widen_string(buf, n, end, spec);
+}
+
+static noinline_for_stack
+char *file_dentry_name(char *buf, char *end, const struct file *f,
+ struct printf_spec spec, const char *fmt)
+{
+ if (check_pointer(&buf, end, f, spec))
+ return buf;
+
+ return dentry_name(buf, end, f->f_path.dentry, spec, fmt);
+}
+#ifdef CONFIG_BLOCK
+static noinline_for_stack
+char *bdev_name(char *buf, char *end, struct block_device *bdev,
+ struct printf_spec spec, const char *fmt)
+{
+ struct gendisk *hd;
+
+ if (check_pointer(&buf, end, bdev, spec))
+ return buf;
+
+ hd = bdev->bd_disk;
+ buf = string(buf, end, hd->disk_name, spec);
+ if (bdev->bd_partno) {
+ if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
+ if (buf < end)
+ *buf = 'p';
+ buf++;
+ }
+ buf = number(buf, end, bdev->bd_partno, spec);
+ }
+ return buf;
+}
+#endif
+
+static noinline_for_stack
+char *symbol_string(char *buf, char *end, void *ptr,
+ struct printf_spec spec, const char *fmt)
+{
+ unsigned long value;
+#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
+#endif
+
+ if (fmt[1] == 'R')
+ ptr = __builtin_extract_return_addr(ptr);
+ value = (unsigned long)ptr;
+
+#ifdef CONFIG_KALLSYMS
+ if (*fmt == 'B' && fmt[1] == 'b')
+ sprint_backtrace_build_id(sym, value);
+ else if (*fmt == 'B')
+ sprint_backtrace(sym, value);
+ else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
+ sprint_symbol_build_id(sym, value);
+ else if (*fmt != 's')
+ sprint_symbol(sym, value);
+ else
+ sprint_symbol_no_offset(sym, value);
+
+ return string_nocheck(buf, end, sym, spec);
+#else
+ return special_hex_number(buf, end, value, sizeof(void *));
+#endif
+}
+
+static const struct printf_spec default_str_spec = {
+ .field_width = -1,
+ .precision = -1,
+};
+
+static const struct printf_spec default_flag_spec = {
+ .base = 16,
+ .precision = -1,
+ .flags = SPECIAL | SMALL,
+};
+
+static const struct printf_spec default_dec_spec = {
+ .base = 10,
+ .precision = -1,
+};
+
+static const struct printf_spec default_dec02_spec = {
+ .base = 10,
+ .field_width = 2,
+ .precision = -1,
+ .flags = ZEROPAD,
+};
+
+static const struct printf_spec default_dec04_spec = {
+ .base = 10,
+ .field_width = 4,
+ .precision = -1,
+ .flags = ZEROPAD,
+};
+
+static noinline_for_stack
+char *resource_string(char *buf, char *end, struct resource *res,
+ struct printf_spec spec, const char *fmt)
+{
+#ifndef IO_RSRC_PRINTK_SIZE
+#define IO_RSRC_PRINTK_SIZE 6
+#endif
+
+#ifndef MEM_RSRC_PRINTK_SIZE
+#define MEM_RSRC_PRINTK_SIZE 10
+#endif
+ static const struct printf_spec io_spec = {
+ .base = 16,
+ .field_width = IO_RSRC_PRINTK_SIZE,
+ .precision = -1,
+ .flags = SPECIAL | SMALL | ZEROPAD,
+ };
+ static const struct printf_spec mem_spec = {
+ .base = 16,
+ .field_width = MEM_RSRC_PRINTK_SIZE,
+ .precision = -1,
+ .flags = SPECIAL | SMALL | ZEROPAD,
+ };
+ static const struct printf_spec bus_spec = {
+ .base = 16,
+ .field_width = 2,
+ .precision = -1,
+ .flags = SMALL | ZEROPAD,
+ };
+ static const struct printf_spec str_spec = {
+ .field_width = -1,
+ .precision = 10,
+ .flags = LEFT,
+ };
+
+ /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
+ * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
+#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
+#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
+#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
+#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
+ char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
+ 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
+
+ char *p = sym, *pend = sym + sizeof(sym);
+ int decode = (fmt[0] == 'R') ? 1 : 0;
+ const struct printf_spec *specp;
+
+ if (check_pointer(&buf, end, res, spec))
+ return buf;
+
+ *p++ = '[';
+ if (res->flags & IORESOURCE_IO) {
+ p = string_nocheck(p, pend, "io ", str_spec);
+ specp = &io_spec;
+ } else if (res->flags & IORESOURCE_MEM) {
+ p = string_nocheck(p, pend, "mem ", str_spec);
+ specp = &mem_spec;
+ } else if (res->flags & IORESOURCE_IRQ) {
+ p = string_nocheck(p, pend, "irq ", str_spec);
+ specp = &default_dec_spec;
+ } else if (res->flags & IORESOURCE_DMA) {
+ p = string_nocheck(p, pend, "dma ", str_spec);
+ specp = &default_dec_spec;
+ } else if (res->flags & IORESOURCE_BUS) {
+ p = string_nocheck(p, pend, "bus ", str_spec);
+ specp = &bus_spec;
+ } else {
+ p = string_nocheck(p, pend, "??? ", str_spec);
+ specp = &mem_spec;
+ decode = 0;
+ }
+ if (decode && res->flags & IORESOURCE_UNSET) {
+ p = string_nocheck(p, pend, "size ", str_spec);
+ p = number(p, pend, resource_size(res), *specp);
+ } else {
+ p = number(p, pend, res->start, *specp);
+ if (res->start != res->end) {
+ *p++ = '-';
+ p = number(p, pend, res->end, *specp);
+ }
+ }
+ if (decode) {
+ if (res->flags & IORESOURCE_MEM_64)
+ p = string_nocheck(p, pend, " 64bit", str_spec);
+ if (res->flags & IORESOURCE_PREFETCH)
+ p = string_nocheck(p, pend, " pref", str_spec);
+ if (res->flags & IORESOURCE_WINDOW)
+ p = string_nocheck(p, pend, " window", str_spec);
+ if (res->flags & IORESOURCE_DISABLED)
+ p = string_nocheck(p, pend, " disabled", str_spec);
+ } else {
+ p = string_nocheck(p, pend, " flags ", str_spec);
+ p = number(p, pend, res->flags, default_flag_spec);
+ }
+ *p++ = ']';
+ *p = '\0';
+
+ return string_nocheck(buf, end, sym, spec);
+}
+
+static noinline_for_stack
+char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
+ const char *fmt)
+{
+ int i, len = 1; /* if we pass '%ph[CDN]', field width remains
+ negative value, fallback to the default */
+ char separator;
+
+ if (spec.field_width == 0)
+ /* nothing to print */
+ return buf;
+
+ if (check_pointer(&buf, end, addr, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case 'C':
+ separator = ':';
+ break;
+ case 'D':
+ separator = '-';
+ break;
+ case 'N':
+ separator = 0;
+ break;
+ default:
+ separator = ' ';
+ break;
+ }
+
+ if (spec.field_width > 0)
+ len = min_t(int, spec.field_width, 64);
+
+ for (i = 0; i < len; ++i) {
+ if (buf < end)
+ *buf = hex_asc_hi(addr[i]);
+ ++buf;
+ if (buf < end)
+ *buf = hex_asc_lo(addr[i]);
+ ++buf;
+
+ if (separator && i != len - 1) {
+ if (buf < end)
+ *buf = separator;
+ ++buf;
+ }
+ }
+
+ return buf;
+}
+
+static noinline_for_stack
+char *bitmap_string(char *buf, char *end, const unsigned long *bitmap,
+ struct printf_spec spec, const char *fmt)
+{
+ const int CHUNKSZ = 32;
+ int nr_bits = max_t(int, spec.field_width, 0);
+ int i, chunksz;
+ bool first = true;
+
+ if (check_pointer(&buf, end, bitmap, spec))
+ return buf;
+
+ /* reused to print numbers */
+ spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
+
+ chunksz = nr_bits & (CHUNKSZ - 1);
+ if (chunksz == 0)
+ chunksz = CHUNKSZ;
+
+ i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ;
+ for (; i >= 0; i -= CHUNKSZ) {
+ u32 chunkmask, val;
+ int word, bit;
+
+ chunkmask = ((1ULL << chunksz) - 1);
+ word = i / BITS_PER_LONG;
+ bit = i % BITS_PER_LONG;
+ val = (bitmap[word] >> bit) & chunkmask;
+
+ if (!first) {
+ if (buf < end)
+ *buf = ',';
+ buf++;
+ }
+ first = false;
+
+ spec.field_width = DIV_ROUND_UP(chunksz, 4);
+ buf = number(buf, end, val, spec);
+
+ chunksz = CHUNKSZ;
+ }
+ return buf;
+}
+
+static noinline_for_stack
+char *bitmap_list_string(char *buf, char *end, const unsigned long *bitmap,
+ struct printf_spec spec, const char *fmt)
+{
+ int nr_bits = max_t(int, spec.field_width, 0);
+ bool first = true;
+ int rbot, rtop;
+
+ if (check_pointer(&buf, end, bitmap, spec))
+ return buf;
+
+ for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) {
+ if (!first) {
+ if (buf < end)
+ *buf = ',';
+ buf++;
+ }
+ first = false;
+
+ buf = number(buf, end, rbot, default_dec_spec);
+ if (rtop == rbot + 1)
+ continue;
+
+ if (buf < end)
+ *buf = '-';
+ buf = number(++buf, end, rtop - 1, default_dec_spec);
+ }
+ return buf;
+}
+
+static noinline_for_stack
+char *mac_address_string(char *buf, char *end, u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
+ char *p = mac_addr;
+ int i;
+ char separator;
+ bool reversed = false;
+
+ if (check_pointer(&buf, end, addr, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case 'F':
+ separator = '-';
+ break;
+
+ case 'R':
+ reversed = true;
+ fallthrough;
+
+ default:
+ separator = ':';
+ break;
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (reversed)
+ p = hex_byte_pack(p, addr[5 - i]);
+ else
+ p = hex_byte_pack(p, addr[i]);
+
+ if (fmt[0] == 'M' && i != 5)
+ *p++ = separator;
+ }
+ *p = '\0';
+
+ return string_nocheck(buf, end, mac_addr, spec);
+}
+
+static noinline_for_stack
+char *ip4_string(char *p, const u8 *addr, const char *fmt)
+{
+ int i;
+ bool leading_zeros = (fmt[0] == 'i');
+ int index;
+ int step;
+
+ switch (fmt[2]) {
+ case 'h':
+#ifdef __BIG_ENDIAN
+ index = 0;
+ step = 1;
+#else
+ index = 3;
+ step = -1;
+#endif
+ break;
+ case 'l':
+ index = 3;
+ step = -1;
+ break;
+ case 'n':
+ case 'b':
+ default:
+ index = 0;
+ step = 1;
+ break;
+ }
+ for (i = 0; i < 4; i++) {
+ char temp[4] __aligned(2); /* hold each IP quad in reverse order */
+ int digits = put_dec_trunc8(temp, addr[index]) - temp;
+ if (leading_zeros) {
+ if (digits < 3)
+ *p++ = '0';
+ if (digits < 2)
+ *p++ = '0';
+ }
+ /* reverse the digits in the quad */
+ while (digits--)
+ *p++ = temp[digits];
+ if (i < 3)
+ *p++ = '.';
+ index += step;
+ }
+ *p = '\0';
+
+ return p;
+}
+
+static noinline_for_stack
+char *ip6_compressed_string(char *p, const char *addr)
+{
+ int i, j, range;
+ unsigned char zerolength[8];
+ int longest = 1;
+ int colonpos = -1;
+ u16 word;
+ u8 hi, lo;
+ bool needcolon = false;
+ bool useIPv4;
+ struct in6_addr in6;
+
+ memcpy(&in6, addr, sizeof(struct in6_addr));
+
+ useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
+
+ memset(zerolength, 0, sizeof(zerolength));
+
+ if (useIPv4)
+ range = 6;
+ else
+ range = 8;
+
+ /* find position of longest 0 run */
+ for (i = 0; i < range; i++) {
+ for (j = i; j < range; j++) {
+ if (in6.s6_addr16[j] != 0)
+ break;
+ zerolength[i]++;
+ }
+ }
+ for (i = 0; i < range; i++) {
+ if (zerolength[i] > longest) {
+ longest = zerolength[i];
+ colonpos = i;
+ }
+ }
+ if (longest == 1) /* don't compress a single 0 */
+ colonpos = -1;
+
+ /* emit address */
+ for (i = 0; i < range; i++) {
+ if (i == colonpos) {
+ if (needcolon || i == 0)
+ *p++ = ':';
+ *p++ = ':';
+ needcolon = false;
+ i += longest - 1;
+ continue;
+ }
+ if (needcolon) {
+ *p++ = ':';
+ needcolon = false;
+ }
+ /* hex u16 without leading 0s */
+ word = ntohs(in6.s6_addr16[i]);
+ hi = word >> 8;
+ lo = word & 0xff;
+ if (hi) {
+ if (hi > 0x0f)
+ p = hex_byte_pack(p, hi);
+ else
+ *p++ = hex_asc_lo(hi);
+ p = hex_byte_pack(p, lo);
+ }
+ else if (lo > 0x0f)
+ p = hex_byte_pack(p, lo);
+ else
+ *p++ = hex_asc_lo(lo);
+ needcolon = true;
+ }
+
+ if (useIPv4) {
+ if (needcolon)
+ *p++ = ':';
+ p = ip4_string(p, &in6.s6_addr[12], "I4");
+ }
+ *p = '\0';
+
+ return p;
+}
+
+static noinline_for_stack
+char *ip6_string(char *p, const char *addr, const char *fmt)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ p = hex_byte_pack(p, *addr++);
+ p = hex_byte_pack(p, *addr++);
+ if (fmt[0] == 'I' && i != 7)
+ *p++ = ':';
+ }
+ *p = '\0';
+
+ return p;
+}
+
+static noinline_for_stack
+char *ip6_addr_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
+
+ if (fmt[0] == 'I' && fmt[2] == 'c')
+ ip6_compressed_string(ip6_addr, addr);
+ else
+ ip6_string(ip6_addr, addr, fmt);
+
+ return string_nocheck(buf, end, ip6_addr, spec);
+}
+
+static noinline_for_stack
+char *ip4_addr_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char ip4_addr[sizeof("255.255.255.255")];
+
+ ip4_string(ip4_addr, addr, fmt);
+
+ return string_nocheck(buf, end, ip4_addr, spec);
+}
+
+static noinline_for_stack
+char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
+ struct printf_spec spec, const char *fmt)
+{
+ bool have_p = false, have_s = false, have_f = false, have_c = false;
+ char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") +
+ sizeof(":12345") + sizeof("/123456789") +
+ sizeof("%1234567890")];
+ char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr);
+ const u8 *addr = (const u8 *) &sa->sin6_addr;
+ char fmt6[2] = { fmt[0], '6' };
+ u8 off = 0;
+
+ fmt++;
+ while (isalpha(*++fmt)) {
+ switch (*fmt) {
+ case 'p':
+ have_p = true;
+ break;
+ case 'f':
+ have_f = true;
+ break;
+ case 's':
+ have_s = true;
+ break;
+ case 'c':
+ have_c = true;
+ break;
+ }
+ }
+
+ if (have_p || have_s || have_f) {
+ *p = '[';
+ off = 1;
+ }
+
+ if (fmt6[0] == 'I' && have_c)
+ p = ip6_compressed_string(ip6_addr + off, addr);
+ else
+ p = ip6_string(ip6_addr + off, addr, fmt6);
+
+ if (have_p || have_s || have_f)
+ *p++ = ']';
+
+ if (have_p) {
+ *p++ = ':';
+ p = number(p, pend, ntohs(sa->sin6_port), spec);
+ }
+ if (have_f) {
+ *p++ = '/';
+ p = number(p, pend, ntohl(sa->sin6_flowinfo &
+ IPV6_FLOWINFO_MASK), spec);
+ }
+ if (have_s) {
+ *p++ = '%';
+ p = number(p, pend, sa->sin6_scope_id, spec);
+ }
+ *p = '\0';
+
+ return string_nocheck(buf, end, ip6_addr, spec);
+}
+
+static noinline_for_stack
+char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
+ struct printf_spec spec, const char *fmt)
+{
+ bool have_p = false;
+ char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")];
+ char *pend = ip4_addr + sizeof(ip4_addr);
+ const u8 *addr = (const u8 *) &sa->sin_addr.s_addr;
+ char fmt4[3] = { fmt[0], '4', 0 };
+
+ fmt++;
+ while (isalpha(*++fmt)) {
+ switch (*fmt) {
+ case 'p':
+ have_p = true;
+ break;
+ case 'h':
+ case 'l':
+ case 'n':
+ case 'b':
+ fmt4[2] = *fmt;
+ break;
+ }
+ }
+
+ p = ip4_string(ip4_addr, addr, fmt4);
+ if (have_p) {
+ *p++ = ':';
+ p = number(p, pend, ntohs(sa->sin_port), spec);
+ }
+ *p = '\0';
+
+ return string_nocheck(buf, end, ip4_addr, spec);
+}
+
+static noinline_for_stack
+char *ip_addr_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec, const char *fmt)
+{
+ char *err_fmt_msg;
+
+ if (check_pointer(&buf, end, ptr, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case '6':
+ return ip6_addr_string(buf, end, ptr, spec, fmt);
+ case '4':
+ return ip4_addr_string(buf, end, ptr, spec, fmt);
+ case 'S': {
+ const union {
+ struct sockaddr raw;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } *sa = ptr;
+
+ switch (sa->raw.sa_family) {
+ case AF_INET:
+ return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
+ case AF_INET6:
+ return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
+ default:
+ return error_string(buf, end, "(einval)", spec);
+ }}
+ }
+
+ err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)";
+ return error_string(buf, end, err_fmt_msg, spec);
+}
+
+static noinline_for_stack
+char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
+ const char *fmt)
+{
+ bool found = true;
+ int count = 1;
+ unsigned int flags = 0;
+ int len;
+
+ if (spec.field_width == 0)
+ return buf; /* nothing to print */
+
+ if (check_pointer(&buf, end, addr, spec))
+ return buf;
+
+ do {
+ switch (fmt[count++]) {
+ case 'a':
+ flags |= ESCAPE_ANY;
+ break;
+ case 'c':
+ flags |= ESCAPE_SPECIAL;
+ break;
+ case 'h':
+ flags |= ESCAPE_HEX;
+ break;
+ case 'n':
+ flags |= ESCAPE_NULL;
+ break;
+ case 'o':
+ flags |= ESCAPE_OCTAL;
+ break;
+ case 'p':
+ flags |= ESCAPE_NP;
+ break;
+ case 's':
+ flags |= ESCAPE_SPACE;
+ break;
+ default:
+ found = false;
+ break;
+ }
+ } while (found);
+
+ if (!flags)
+ flags = ESCAPE_ANY_NP;
+
+ len = spec.field_width < 0 ? 1 : spec.field_width;
+
+ /*
+ * string_escape_mem() writes as many characters as it can to
+ * the given buffer, and returns the total size of the output
+ * had the buffer been big enough.
+ */
+ buf += string_escape_mem(addr, len, buf, buf < end ? end - buf : 0, flags, NULL);
+
+ return buf;
+}
+
+static char *va_format(char *buf, char *end, struct va_format *va_fmt,
+ struct printf_spec spec, const char *fmt)
+{
+ va_list va;
+
+ if (check_pointer(&buf, end, va_fmt, spec))
+ return buf;
+
+ va_copy(va, *va_fmt->va);
+ buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va);
+ va_end(va);
+
+ return buf;
+}
+
+static noinline_for_stack
+char *uuid_string(char *buf, char *end, const u8 *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ char uuid[UUID_STRING_LEN + 1];
+ char *p = uuid;
+ int i;
+ const u8 *index = uuid_index;
+ bool uc = false;
+
+ if (check_pointer(&buf, end, addr, spec))
+ return buf;
+
+ switch (*(++fmt)) {
+ case 'L':
+ uc = true;
+ fallthrough;
+ case 'l':
+ index = guid_index;
+ break;
+ case 'B':
+ uc = true;
+ break;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (uc)
+ p = hex_byte_pack_upper(p, addr[index[i]]);
+ else
+ p = hex_byte_pack(p, addr[index[i]]);
+ switch (i) {
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ *p++ = '-';
+ break;
+ }
+ }
+
+ *p = 0;
+
+ return string_nocheck(buf, end, uuid, spec);
+}
+
+static noinline_for_stack
+char *netdev_bits(char *buf, char *end, const void *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ unsigned long long num;
+ int size;
+
+ if (check_pointer(&buf, end, addr, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case 'F':
+ num = *(const netdev_features_t *)addr;
+ size = sizeof(netdev_features_t);
+ break;
+ default:
+ return error_string(buf, end, "(%pN?)", spec);
+ }
+
+ return special_hex_number(buf, end, num, size);
+}
+
+static noinline_for_stack
+char *fourcc_string(char *buf, char *end, const u32 *fourcc,
+ struct printf_spec spec, const char *fmt)
+{
+ char output[sizeof("0123 little-endian (0x01234567)")];
+ char *p = output;
+ unsigned int i;
+ u32 orig, val;
+
+ if (fmt[1] != 'c' || fmt[2] != 'c')
+ return error_string(buf, end, "(%p4?)", spec);
+
+ if (check_pointer(&buf, end, fourcc, spec))
+ return buf;
+
+ orig = get_unaligned(fourcc);
+ val = orig & ~BIT(31);
+
+ for (i = 0; i < sizeof(u32); i++) {
+ unsigned char c = val >> (i * 8);
+
+ /* Print non-control ASCII characters as-is, dot otherwise */
+ *p++ = isascii(c) && isprint(c) ? c : '.';
+ }
+
+ *p++ = ' ';
+ strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
+ p += strlen(p);
+
+ *p++ = ' ';
+ *p++ = '(';
+ p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32));
+ *p++ = ')';
+ *p = '\0';
+
+ return string(buf, end, output, spec);
+}
+
+static noinline_for_stack
+char *address_val(char *buf, char *end, const void *addr,
+ struct printf_spec spec, const char *fmt)
+{
+ unsigned long long num;
+ int size;
+
+ if (check_pointer(&buf, end, addr, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case 'd':
+ num = *(const dma_addr_t *)addr;
+ size = sizeof(dma_addr_t);
+ break;
+ case 'p':
+ default:
+ num = *(const phys_addr_t *)addr;
+ size = sizeof(phys_addr_t);
+ break;
+ }
+
+ return special_hex_number(buf, end, num, size);
+}
+
+static noinline_for_stack
+char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r)
+{
+ int year = tm->tm_year + (r ? 0 : 1900);
+ int mon = tm->tm_mon + (r ? 0 : 1);
+
+ buf = number(buf, end, year, default_dec04_spec);
+ if (buf < end)
+ *buf = '-';
+ buf++;
+
+ buf = number(buf, end, mon, default_dec02_spec);
+ if (buf < end)
+ *buf = '-';
+ buf++;
+
+ return number(buf, end, tm->tm_mday, default_dec02_spec);
+}
+
+static noinline_for_stack
+char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
+{
+ buf = number(buf, end, tm->tm_hour, default_dec02_spec);
+ if (buf < end)
+ *buf = ':';
+ buf++;
+
+ buf = number(buf, end, tm->tm_min, default_dec02_spec);
+ if (buf < end)
+ *buf = ':';
+ buf++;
+
+ return number(buf, end, tm->tm_sec, default_dec02_spec);
+}
+
+static noinline_for_stack
+char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
+ struct printf_spec spec, const char *fmt)
+{
+ bool have_t = true, have_d = true;
+ bool raw = false, iso8601_separator = true;
+ bool found = true;
+ int count = 2;
+
+ if (check_pointer(&buf, end, tm, spec))
+ return buf;
+
+ switch (fmt[count]) {
+ case 'd':
+ have_t = false;
+ count++;
+ break;
+ case 't':
+ have_d = false;
+ count++;
+ break;
+ }
+
+ do {
+ switch (fmt[count++]) {
+ case 'r':
+ raw = true;
+ break;
+ case 's':
+ iso8601_separator = false;
+ break;
+ default:
+ found = false;
+ break;
+ }
+ } while (found);
+
+ if (have_d)
+ buf = date_str(buf, end, tm, raw);
+ if (have_d && have_t) {
+ if (buf < end)
+ *buf = iso8601_separator ? 'T' : ' ';
+ buf++;
+ }
+ if (have_t)
+ buf = time_str(buf, end, tm, raw);
+
+ return buf;
+}
+
+static noinline_for_stack
+char *time64_str(char *buf, char *end, const time64_t time,
+ struct printf_spec spec, const char *fmt)
+{
+ struct rtc_time rtc_time;
+ struct tm tm;
+
+ time64_to_tm(time, 0, &tm);
+
+ rtc_time.tm_sec = tm.tm_sec;
+ rtc_time.tm_min = tm.tm_min;
+ rtc_time.tm_hour = tm.tm_hour;
+ rtc_time.tm_mday = tm.tm_mday;
+ rtc_time.tm_mon = tm.tm_mon;
+ rtc_time.tm_year = tm.tm_year;
+ rtc_time.tm_wday = tm.tm_wday;
+ rtc_time.tm_yday = tm.tm_yday;
+
+ rtc_time.tm_isdst = 0;
+
+ return rtc_str(buf, end, &rtc_time, spec, fmt);
+}
+
+static noinline_for_stack
+char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
+ const char *fmt)
+{
+ switch (fmt[1]) {
+ case 'R':
+ return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
+ case 'T':
+ return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
+ default:
+ return error_string(buf, end, "(%pt?)", spec);
+ }
+}
+
+static noinline_for_stack
+char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
+ const char *fmt)
+{
+ if (!IS_ENABLED(CONFIG_HAVE_CLK))
+ return error_string(buf, end, "(%pC?)", spec);
+
+ if (check_pointer(&buf, end, clk, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case 'n':
+ default:
+#ifdef CONFIG_COMMON_CLK
+ return string(buf, end, __clk_get_name(clk), spec);
+#else
+ return ptr_to_id(buf, end, clk, spec);
+#endif
+ }
+}
+
+static
+char *format_flags(char *buf, char *end, unsigned long flags,
+ const struct trace_print_flags *names)
+{
+ unsigned long mask;
+
+ for ( ; flags && names->name; names++) {
+ mask = names->mask;
+ if ((flags & mask) != mask)
+ continue;
+
+ buf = string(buf, end, names->name, default_str_spec);
+
+ flags &= ~mask;
+ if (flags) {
+ if (buf < end)
+ *buf = '|';
+ buf++;
+ }
+ }
+
+ if (flags)
+ buf = number(buf, end, flags, default_flag_spec);
+
+ return buf;
+}
+
+struct page_flags_fields {
+ int width;
+ int shift;
+ int mask;
+ const struct printf_spec *spec;
+ const char *name;
+};
+
+static const struct page_flags_fields pff[] = {
+ {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
+ &default_dec_spec, "section"},
+ {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
+ &default_dec_spec, "node"},
+ {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
+ &default_dec_spec, "zone"},
+ {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
+ &default_flag_spec, "lastcpupid"},
+ {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
+ &default_flag_spec, "kasantag"},
+};
+
+static
+char *format_page_flags(char *buf, char *end, unsigned long flags)
+{
+ unsigned long main_flags = flags & PAGEFLAGS_MASK;
+ bool append = false;
+ int i;
+
+ buf = number(buf, end, flags, default_flag_spec);
+ if (buf < end)
+ *buf = '(';
+ buf++;
+
+ /* Page flags from the main area. */
+ if (main_flags) {
+ buf = format_flags(buf, end, main_flags, pageflag_names);
+ append = true;
+ }
+
+ /* Page flags from the fields area */
+ for (i = 0; i < ARRAY_SIZE(pff); i++) {
+ /* Skip undefined fields. */
+ if (!pff[i].width)
+ continue;
+
+ /* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */
+ if (append) {
+ if (buf < end)
+ *buf = '|';
+ buf++;
+ }
+
+ buf = string(buf, end, pff[i].name, default_str_spec);
+ if (buf < end)
+ *buf = '=';
+ buf++;
+ buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask,
+ *pff[i].spec);
+
+ append = true;
+ }
+ if (buf < end)
+ *buf = ')';
+ buf++;
+
+ return buf;
+}
+
+static
+char *format_page_type(char *buf, char *end, unsigned int page_type)
+{
+ buf = number(buf, end, page_type, default_flag_spec);
+
+ if (buf < end)
+ *buf = '(';
+ buf++;
+
+ if (page_type_has_type(page_type))
+ buf = format_flags(buf, end, ~page_type, pagetype_names);
+
+ if (buf < end)
+ *buf = ')';
+ buf++;
+
+ return buf;
+}
+
+static noinline_for_stack
+char *flags_string(char *buf, char *end, void *flags_ptr,
+ struct printf_spec spec, const char *fmt)
+{
+ unsigned long flags;
+ const struct trace_print_flags *names;
+
+ if (check_pointer(&buf, end, flags_ptr, spec))
+ return buf;
+
+ switch (fmt[1]) {
+ case 'p':
+ return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
+ case 't':
+ return format_page_type(buf, end, *(unsigned int *)flags_ptr);
+ case 'v':
+ flags = *(unsigned long *)flags_ptr;
+ names = vmaflag_names;
+ break;
+ case 'g':
+ flags = (__force unsigned long)(*(gfp_t *)flags_ptr);
+ names = gfpflag_names;
+ break;
+ default:
+ return error_string(buf, end, "(%pG?)", spec);
+ }
+
+ return format_flags(buf, end, flags, names);
+}
+
+static noinline_for_stack
+char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
+ char *end)
+{
+ int depth;
+
+ /* Loop starting from the root node to the current node. */
+ for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
+ /*
+ * Only get a reference for other nodes (i.e. parent nodes).
+ * fwnode refcount may be 0 here.
+ */
+ struct fwnode_handle *__fwnode = depth ?
+ fwnode_get_nth_parent(fwnode, depth) : fwnode;
+
+ buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
+ default_str_spec);
+ buf = string(buf, end, fwnode_get_name(__fwnode),
+ default_str_spec);
+
+ if (depth)
+ fwnode_handle_put(__fwnode);
+ }
+
+ return buf;
+}
+
+static noinline_for_stack
+char *device_node_string(char *buf, char *end, struct device_node *dn,
+ struct printf_spec spec, const char *fmt)
+{
+ char tbuf[sizeof("xxxx") + 1];
+ const char *p;
+ int ret;
+ char *buf_start = buf;
+ struct property *prop;
+ bool has_mult, pass;
+
+ struct printf_spec str_spec = spec;
+ str_spec.field_width = -1;
+
+ if (fmt[0] != 'F')
+ return error_string(buf, end, "(%pO?)", spec);
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return error_string(buf, end, "(%pOF?)", spec);
+
+ if (check_pointer(&buf, end, dn, spec))
+ return buf;
+
+ /* simple case without anything any more format specifiers */
+ fmt++;
+ if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0)
+ fmt = "f";
+
+ for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
+ int precision;
+ if (pass) {
+ if (buf < end)
+ *buf = ':';
+ buf++;
+ }
+
+ switch (*fmt) {
+ case 'f': /* full_name */
+ buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
+ end);
+ break;
+ case 'n': /* name */
+ p = fwnode_get_name(of_fwnode_handle(dn));
+ precision = str_spec.precision;
+ str_spec.precision = strchrnul(p, '@') - p;
+ buf = string(buf, end, p, str_spec);
+ str_spec.precision = precision;
+ break;
+ case 'p': /* phandle */
+ buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec);
+ break;
+ case 'P': /* path-spec */
+ p = fwnode_get_name(of_fwnode_handle(dn));
+ if (!p[1])
+ p = "/";
+ buf = string(buf, end, p, str_spec);
+ break;
+ case 'F': /* flags */
+ tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-';
+ tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-';
+ tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
+ tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
+ tbuf[4] = 0;
+ buf = string_nocheck(buf, end, tbuf, str_spec);
+ break;
+ case 'c': /* major compatible string */
+ ret = of_property_read_string(dn, "compatible", &p);
+ if (!ret)
+ buf = string(buf, end, p, str_spec);
+ break;
+ case 'C': /* full compatible string */
+ has_mult = false;
+ of_property_for_each_string(dn, "compatible", prop, p) {
+ if (has_mult)
+ buf = string_nocheck(buf, end, ",", str_spec);
+ buf = string_nocheck(buf, end, "\"", str_spec);
+ buf = string(buf, end, p, str_spec);
+ buf = string_nocheck(buf, end, "\"", str_spec);
+
+ has_mult = true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return widen_string(buf, buf - buf_start, end, spec);
+}
+
+static noinline_for_stack
+char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
+ struct printf_spec spec, const char *fmt)
+{
+ struct printf_spec str_spec = spec;
+ char *buf_start = buf;
+
+ str_spec.field_width = -1;
+
+ if (*fmt != 'w')
+ return error_string(buf, end, "(%pf?)", spec);
+
+ if (check_pointer(&buf, end, fwnode, spec))
+ return buf;
+
+ fmt++;
+
+ switch (*fmt) {
+ case 'P': /* name */
+ buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
+ break;
+ case 'f': /* full_name */
+ default:
+ buf = fwnode_full_name_string(fwnode, buf, end);
+ break;
+ }
+
+ return widen_string(buf, buf - buf_start, end, spec);
+}
+
+int __init no_hash_pointers_enable(char *str)
+{
+ if (no_hash_pointers)
+ return 0;
+
+ no_hash_pointers = true;
+
+ pr_warn("**********************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** This system shows unhashed kernel memory addresses **\n");
+ pr_warn("** via the console, logs, and other interfaces. This **\n");
+ pr_warn("** might reduce the security of your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging **\n");
+ pr_warn("** the kernel, report this immediately to your system **\n");
+ pr_warn("** administrator! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("**********************************************************\n");
+
+ return 0;
+}
+early_param("no_hash_pointers", no_hash_pointers_enable);
+
+/* Used for Rust formatting ('%pA'). */
+char *rust_fmt_argument(char *buf, char *end, void *ptr);
+
+/*
+ * Show a '%p' thing. A kernel extension is that the '%p' is followed
+ * by an extra set of alphanumeric characters that are extended format
+ * specifiers.
+ *
+ * Please update scripts/checkpatch.pl when adding/removing conversion
+ * characters. (Search for "check for vsprintf extension").
+ *
+ * Right now we handle:
+ *
+ * - 'S' For symbolic direct pointers (or function descriptors) with offset
+ * - 's' For symbolic direct pointers (or function descriptors) without offset
+ * - '[Ss]R' as above with __builtin_extract_return_addr() translation
+ * - 'S[R]b' as above with module build ID (for use in backtraces)
+ * - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
+ * %ps and %pS. Be careful when re-using these specifiers.
+ * - 'B' For backtraced symbolic direct pointers with offset
+ * - 'Bb' as above with module build ID (for use in backtraces)
+ * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
+ * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
+ * - 'b[l]' For a bitmap, the number of bits is determined by the field
+ * width which must be explicitly specified either as part of the
+ * format string '%32b[l]' or through '%*b[l]', [l] selects
+ * range-list format instead of hex format
+ * - 'M' For a 6-byte MAC address, it prints the address in the
+ * usual colon-separated hex notation
+ * - 'm' For a 6-byte MAC address, it prints the hex address without colons
+ * - 'MF' For a 6-byte MAC FDDI address, it prints the address
+ * with a dash-separated hex notation
+ * - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
+ * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
+ * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
+ * IPv6 uses colon separated network-order 16 bit hex with leading 0's
+ * [S][pfs]
+ * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
+ * [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
+ * - 'i' [46] for 'raw' IPv4/IPv6 addresses
+ * IPv6 omits the colons (01020304...0f)
+ * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
+ * [S][pfs]
+ * Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
+ * [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
+ * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
+ * - 'I[6S]c' for IPv6 addresses printed as specified by
+ * https://tools.ietf.org/html/rfc5952
+ * - 'E[achnops]' For an escaped buffer, where rules are defined by combination
+ * of the following flags (see string_escape_mem() for the
+ * details):
+ * a - ESCAPE_ANY
+ * c - ESCAPE_SPECIAL
+ * h - ESCAPE_HEX
+ * n - ESCAPE_NULL
+ * o - ESCAPE_OCTAL
+ * p - ESCAPE_NP
+ * s - ESCAPE_SPACE
+ * By default ESCAPE_ANY_NP is used.
+ * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
+ * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ * Options for %pU are:
+ * b big endian lower case hex (default)
+ * B big endian UPPER case hex
+ * l little endian lower case hex
+ * L little endian UPPER case hex
+ * big endian output byte order is:
+ * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
+ * little endian output byte order is:
+ * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
+ * - 'V' For a struct va_format which contains a format string * and va_list *,
+ * call vsnprintf(->format, *->va_list).
+ * Implements a "recursive vsnprintf".
+ * Do not use this feature without some mechanism to verify the
+ * correctness of the format string and va_list arguments.
+ * - 'K' For a kernel pointer that should be hidden from unprivileged users.
+ * Use only for procfs, sysfs and similar files, not printk(); please
+ * read the documentation (path below) first.
+ * - 'NF' For a netdev_features_t
+ * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
+ * - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
+ * a certain separator (' ' by default):
+ * C colon
+ * D dash
+ * N no separator
+ * The maximum supported length is 64 bytes of the input. Consider
+ * to use print_hex_dump() for the larger input.
+ * - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives
+ * (default assumed to be phys_addr_t, passed by reference)
+ * - 'd[234]' For a dentry name (optionally 2-4 last components)
+ * - 'D[234]' Same as 'd' but for a struct file
+ * - 'g' For block_device name (gendisk + partition number)
+ * - 't[RT][dt][r][s]' For time and date as represented by:
+ * R struct rtc_time
+ * T time64_t
+ * - 'C' For a clock, it prints the name (Common Clock Framework) or address
+ * (legacy clock framework) of the clock
+ * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
+ * (legacy clock framework) of the clock
+ * - 'G' For flags to be printed as a collection of symbolic strings that would
+ * construct the specific value. Supported flags given by option:
+ * p page flags (see struct page) given as pointer to unsigned long
+ * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
+ * v vma flags (VM_*) given as pointer to unsigned long
+ * - 'OF[fnpPcCF]' For a device tree object
+ * Without any optional arguments prints the full_name
+ * f device node full_name
+ * n device node name
+ * p device node phandle
+ * P device node path spec (name + @unit)
+ * F device node flags
+ * c major compatible string
+ * C full compatible string
+ * - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
+ * Without an option prints the full name of the node
+ * f full name
+ * P node name, including a possible unit address
+ * - 'x' For printing the address unmodified. Equivalent to "%lx".
+ * Please read the documentation (path below) before using!
+ * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
+ * bpf_trace_printk() where [ku] prefix specifies either kernel (k)
+ * or user (u) memory to probe, and:
+ * s a string, equivalent to "%s" on direct vsnprintf() use
+ *
+ * ** When making changes please also update:
+ * Documentation/core-api/printk-formats.rst
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
+ *
+ * There is also a '%pA' format specifier, but it is only intended to be used
+ * from Rust code to format core::fmt::Arguments. Do *not* use it from C.
+ * See rust/kernel/print.rs for details.
+ */
+static noinline_for_stack
+char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ struct printf_spec spec)
+{
+ switch (*fmt) {
+ case 'S':
+ case 's':
+ ptr = dereference_symbol_descriptor(ptr);
+ fallthrough;
+ case 'B':
+ return symbol_string(buf, end, ptr, spec, fmt);
+ case 'R':
+ case 'r':
+ return resource_string(buf, end, ptr, spec, fmt);
+ case 'h':
+ return hex_string(buf, end, ptr, spec, fmt);
+ case 'b':
+ switch (fmt[1]) {
+ case 'l':
+ return bitmap_list_string(buf, end, ptr, spec, fmt);
+ default:
+ return bitmap_string(buf, end, ptr, spec, fmt);
+ }
+ case 'M': /* Colon separated: 00:01:02:03:04:05 */
+ case 'm': /* Contiguous: 000102030405 */
+ /* [mM]F (FDDI) */
+ /* [mM]R (Reverse order; Bluetooth) */
+ return mac_address_string(buf, end, ptr, spec, fmt);
+ case 'I': /* Formatted IP supported
+ * 4: 1.2.3.4
+ * 6: 0001:0203:...:0708
+ * 6c: 1::708 or 1::1.2.3.4
+ */
+ case 'i': /* Contiguous:
+ * 4: 001.002.003.004
+ * 6: 000102...0f
+ */
+ return ip_addr_string(buf, end, ptr, spec, fmt);
+ case 'E':
+ return escaped_string(buf, end, ptr, spec, fmt);
+ case 'U':
+ return uuid_string(buf, end, ptr, spec, fmt);
+ case 'V':
+ return va_format(buf, end, ptr, spec, fmt);
+ case 'K':
+ return restricted_pointer(buf, end, ptr, spec);
+ case 'N':
+ return netdev_bits(buf, end, ptr, spec, fmt);
+ case '4':
+ return fourcc_string(buf, end, ptr, spec, fmt);
+ case 'a':
+ return address_val(buf, end, ptr, spec, fmt);
+ case 'd':
+ return dentry_name(buf, end, ptr, spec, fmt);
+ case 't':
+ return time_and_date(buf, end, ptr, spec, fmt);
+ case 'C':
+ return clock(buf, end, ptr, spec, fmt);
+ case 'D':
+ return file_dentry_name(buf, end, ptr, spec, fmt);
+#ifdef CONFIG_BLOCK
+ case 'g':
+ return bdev_name(buf, end, ptr, spec, fmt);
+#endif
+
+ case 'G':
+ return flags_string(buf, end, ptr, spec, fmt);
+ case 'O':
+ return device_node_string(buf, end, ptr, spec, fmt + 1);
+ case 'f':
+ return fwnode_string(buf, end, ptr, spec, fmt + 1);
+ case 'A':
+ if (!IS_ENABLED(CONFIG_RUST)) {
+ WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
+ return error_string(buf, end, "(%pA?)", spec);
+ }
+ return rust_fmt_argument(buf, end, ptr);
+ case 'x':
+ return pointer_string(buf, end, ptr, spec);
+ case 'e':
+ /* %pe with a non-ERR_PTR gets treated as plain %p */
+ if (!IS_ERR(ptr))
+ return default_pointer(buf, end, ptr, spec);
+ return err_ptr(buf, end, ptr, spec);
+ case 'u':
+ case 'k':
+ switch (fmt[1]) {
+ case 's':
+ return string(buf, end, ptr, spec);
+ default:
+ return error_string(buf, end, "(einval)", spec);
+ }
+ default:
+ return default_pointer(buf, end, ptr, spec);
+ }
+}
+
+/*
+ * Helper function to decode printf style format.
+ * Each call decode a token from the format and return the
+ * number of characters read (or likely the delta where it wants
+ * to go on the next call).
+ * The decoded token is returned through the parameters
+ *
+ * 'h', 'l', or 'L' for integer fields
+ * 'z' support added 23/7/1999 S.H.
+ * 'z' changed to 'Z' --davidm 1/25/99
+ * 'Z' changed to 'z' --adobriyan 2017-01-25
+ * 't' added for ptrdiff_t
+ *
+ * @fmt: the format string
+ * @type of the token returned
+ * @flags: various flags such as +, -, # tokens..
+ * @field_width: overwritten width
+ * @base: base of the number (octal, hex, ...)
+ * @precision: precision of a number
+ * @qualifier: qualifier of a number (long, size_t, ...)
+ */
+static noinline_for_stack
+int format_decode(const char *fmt, struct printf_spec *spec)
+{
+ const char *start = fmt;
+ char qualifier;
+
+ /* we finished early by reading the field width */
+ if (spec->type == FORMAT_TYPE_WIDTH) {
+ if (spec->field_width < 0) {
+ spec->field_width = -spec->field_width;
+ spec->flags |= LEFT;
+ }
+ spec->type = FORMAT_TYPE_NONE;
+ goto precision;
+ }
+
+ /* we finished early by reading the precision */
+ if (spec->type == FORMAT_TYPE_PRECISION) {
+ if (spec->precision < 0)
+ spec->precision = 0;
+
+ spec->type = FORMAT_TYPE_NONE;
+ goto qualifier;
+ }
+
+ /* By default */
+ spec->type = FORMAT_TYPE_NONE;
+
+ for (; *fmt ; ++fmt) {
+ if (*fmt == '%')
+ break;
+ }
+
+ /* Return the current non-format string */
+ if (fmt != start || !*fmt)
+ return fmt - start;
+
+ /* Process flags */
+ spec->flags = 0;
+
+ while (1) { /* this also skips first '%' */
+ bool found = true;
+
+ ++fmt;
+
+ switch (*fmt) {
+ case '-': spec->flags |= LEFT; break;
+ case '+': spec->flags |= PLUS; break;
+ case ' ': spec->flags |= SPACE; break;
+ case '#': spec->flags |= SPECIAL; break;
+ case '0': spec->flags |= ZEROPAD; break;
+ default: found = false;
+ }
+
+ if (!found)
+ break;
+ }
+
+ /* get field width */
+ spec->field_width = -1;
+
+ if (isdigit(*fmt))
+ spec->field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ /* it's the next argument */
+ spec->type = FORMAT_TYPE_WIDTH;
+ return ++fmt - start;
+ }
+
+precision:
+ /* get the precision */
+ spec->precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt)) {
+ spec->precision = skip_atoi(&fmt);
+ if (spec->precision < 0)
+ spec->precision = 0;
+ } else if (*fmt == '*') {
+ /* it's the next argument */
+ spec->type = FORMAT_TYPE_PRECISION;
+ return ++fmt - start;
+ }
+ }
+
+qualifier:
+ /* get the conversion qualifier */
+ qualifier = 0;
+ if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
+ *fmt == 'z' || *fmt == 't') {
+ qualifier = *fmt++;
+ if (unlikely(qualifier == *fmt)) {
+ if (qualifier == 'l') {
+ qualifier = 'L';
+ ++fmt;
+ } else if (qualifier == 'h') {
+ qualifier = 'H';
+ ++fmt;
+ }
+ }
+ }
+
+ /* default base */
+ spec->base = 10;
+ switch (*fmt) {
+ case 'c':
+ spec->type = FORMAT_TYPE_CHAR;
+ return ++fmt - start;
+
+ case 's':
+ spec->type = FORMAT_TYPE_STR;
+ return ++fmt - start;
+
+ case 'p':
+ spec->type = FORMAT_TYPE_PTR;
+ return ++fmt - start;
+
+ case '%':
+ spec->type = FORMAT_TYPE_PERCENT_CHAR;
+ return ++fmt - start;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ spec->base = 8;
+ break;
+
+ case 'x':
+ spec->flags |= SMALL;
+ fallthrough;
+
+ case 'X':
+ spec->base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ spec->flags |= SIGN;
+ break;
+ case 'u':
+ break;
+
+ case 'n':
+ /*
+ * Since %n poses a greater security risk than
+ * utility, treat it as any other invalid or
+ * unsupported format specifier.
+ */
+ fallthrough;
+
+ default:
+ WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
+ spec->type = FORMAT_TYPE_INVALID;
+ return fmt - start;
+ }
+
+ if (qualifier == 'L')
+ spec->type = FORMAT_TYPE_LONG_LONG;
+ else if (qualifier == 'l') {
+ BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
+ spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
+ } else if (qualifier == 'z') {
+ spec->type = FORMAT_TYPE_SIZE_T;
+ } else if (qualifier == 't') {
+ spec->type = FORMAT_TYPE_PTRDIFF;
+ } else if (qualifier == 'H') {
+ BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE);
+ spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN);
+ } else if (qualifier == 'h') {
+ BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT);
+ spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN);
+ } else {
+ BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT);
+ spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN);
+ }
+
+ return ++fmt - start;
+}
+
+static void
+set_field_width(struct printf_spec *spec, int width)
+{
+ spec->field_width = width;
+ if (WARN_ONCE(spec->field_width != width, "field width %d too large", width)) {
+ spec->field_width = clamp(width, -FIELD_WIDTH_MAX, FIELD_WIDTH_MAX);
+ }
+}
+
+static void
+set_precision(struct printf_spec *spec, int prec)
+{
+ spec->precision = prec;
+ if (WARN_ONCE(spec->precision != prec, "precision %d too large", prec)) {
+ spec->precision = clamp(prec, 0, PRECISION_MAX);
+ }
+}
+
+/**
+ * vsnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * This function generally follows C99 vsnprintf, but has some
+ * extensions and a few limitations:
+ *
+ * - ``%n`` is unsupported
+ * - ``%p*`` is handled by pointer()
+ *
+ * See pointer() or Documentation/core-api/printk-formats.rst for more
+ * extensive description.
+ *
+ * **Please update the documentation in both places when making changes**
+ *
+ * The return value is the number of characters which would
+ * be generated for the given input, excluding the trailing
+ * '\0', as per ISO C99. If you want to have the exact
+ * number of characters written into @buf as return value
+ * (not including the trailing '\0'), use vscnprintf(). If the
+ * return is greater than or equal to @size, the resulting
+ * string is truncated.
+ *
+ * If you're not already dealing with a va_list consider using snprintf().
+ */
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ unsigned long long num;
+ char *str, *end;
+ struct printf_spec spec = {0};
+
+ /* Reject out-of-range values early. Large positive sizes are
+ used for unknown buffer sizes. */
+ if (WARN_ON_ONCE(size > INT_MAX))
+ return 0;
+
+ str = buf;
+ end = buf + size;
+
+ /* Make sure end is always >= buf */
+ if (end < buf) {
+ end = ((void *)-1);
+ size = end - buf;
+ }
+
+ while (*fmt) {
+ const char *old_fmt = fmt;
+ int read = format_decode(fmt, &spec);
+
+ fmt += read;
+
+ switch (spec.type) {
+ case FORMAT_TYPE_NONE: {
+ int copy = read;
+ if (str < end) {
+ if (copy > end - str)
+ copy = end - str;
+ memcpy(str, old_fmt, copy);
+ }
+ str += read;
+ break;
+ }
+
+ case FORMAT_TYPE_WIDTH:
+ set_field_width(&spec, va_arg(args, int));
+ break;
+
+ case FORMAT_TYPE_PRECISION:
+ set_precision(&spec, va_arg(args, int));
+ break;
+
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
+ if (!(spec.flags & LEFT)) {
+ while (--spec.field_width > 0) {
+ if (str < end)
+ *str = ' ';
+ ++str;
+
+ }
+ }
+ c = (unsigned char) va_arg(args, int);
+ if (str < end)
+ *str = c;
+ ++str;
+ while (--spec.field_width > 0) {
+ if (str < end)
+ *str = ' ';
+ ++str;
+ }
+ break;
+ }
+
+ case FORMAT_TYPE_STR:
+ str = string(str, end, va_arg(args, char *), spec);
+ break;
+
+ case FORMAT_TYPE_PTR:
+ str = pointer(fmt, str, end, va_arg(args, void *),
+ spec);
+ while (isalnum(*fmt))
+ fmt++;
+ break;
+
+ case FORMAT_TYPE_PERCENT_CHAR:
+ if (str < end)
+ *str = '%';
+ ++str;
+ break;
+
+ case FORMAT_TYPE_INVALID:
+ /*
+ * Presumably the arguments passed gcc's type
+ * checking, but there is no safe or sane way
+ * for us to continue parsing the format and
+ * fetching from the va_list; the remaining
+ * specifiers and arguments would be out of
+ * sync.
+ */
+ goto out;
+
+ default:
+ switch (spec.type) {
+ case FORMAT_TYPE_LONG_LONG:
+ num = va_arg(args, long long);
+ break;
+ case FORMAT_TYPE_ULONG:
+ num = va_arg(args, unsigned long);
+ break;
+ case FORMAT_TYPE_LONG:
+ num = va_arg(args, long);
+ break;
+ case FORMAT_TYPE_SIZE_T:
+ if (spec.flags & SIGN)
+ num = va_arg(args, ssize_t);
+ else
+ num = va_arg(args, size_t);
+ break;
+ case FORMAT_TYPE_PTRDIFF:
+ num = va_arg(args, ptrdiff_t);
+ break;
+ case FORMAT_TYPE_UBYTE:
+ num = (unsigned char) va_arg(args, int);
+ break;
+ case FORMAT_TYPE_BYTE:
+ num = (signed char) va_arg(args, int);
+ break;
+ case FORMAT_TYPE_USHORT:
+ num = (unsigned short) va_arg(args, int);
+ break;
+ case FORMAT_TYPE_SHORT:
+ num = (short) va_arg(args, int);
+ break;
+ case FORMAT_TYPE_INT:
+ num = (int) va_arg(args, int);
+ break;
+ default:
+ num = va_arg(args, unsigned int);
+ }
+
+ str = number(str, end, num, spec);
+ }
+ }
+
+out:
+ if (size > 0) {
+ if (str < end)
+ *str = '\0';
+ else
+ end[-1] = '\0';
+ }
+
+ /* the trailing null byte doesn't count towards the total */
+ return str-buf;
+
+}
+EXPORT_SYMBOL(vsnprintf);
+
+/**
+ * vscnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The return value is the number of characters which have been written into
+ * the @buf not including the trailing '\0'. If @size is == 0 the function
+ * returns 0.
+ *
+ * If you're not already dealing with a va_list consider using scnprintf().
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int i;
+
+ if (unlikely(!size))
+ return 0;
+
+ i = vsnprintf(buf, size, fmt, args);
+
+ if (likely(i < size))
+ return i;
+
+ return size - 1;
+}
+EXPORT_SYMBOL(vscnprintf);
+
+/**
+ * snprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The return value is the number of characters which would be
+ * generated for the given input, excluding the trailing null,
+ * as per ISO C99. If the return is greater than or equal to
+ * @size, the resulting string is truncated.
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+int snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return i;
+}
+EXPORT_SYMBOL(snprintf);
+
+/**
+ * scnprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The return value is the number of characters written into @buf not including
+ * the trailing '\0'. If @size is == 0 the function returns 0.
+ */
+
+int scnprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ return i;
+}
+EXPORT_SYMBOL(scnprintf);
+
+/**
+ * vsprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The function returns the number of characters written
+ * into @buf. Use vsnprintf() or vscnprintf() in order to avoid
+ * buffer overflows.
+ *
+ * If you're not already dealing with a va_list consider using sprintf().
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ return vsnprintf(buf, INT_MAX, fmt, args);
+}
+EXPORT_SYMBOL(vsprintf);
+
+/**
+ * sprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The function returns the number of characters written
+ * into @buf. Use snprintf() or scnprintf() in order to avoid
+ * buffer overflows.
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+int sprintf(char *buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, INT_MAX, fmt, args);
+ va_end(args);
+
+ return i;
+}
+EXPORT_SYMBOL(sprintf);
+
+#ifdef CONFIG_BINARY_PRINTF
+/*
+ * bprintf service:
+ * vbin_printf() - VA arguments to binary data
+ * bstr_printf() - Binary data to text string
+ */
+
+/**
+ * vbin_printf - Parse a format string and place args' binary value in a buffer
+ * @bin_buf: The buffer to place args' binary value
+ * @size: The size of the buffer(by words(32bits), not characters)
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * The format follows C99 vsnprintf, except %n is ignored, and its argument
+ * is skipped.
+ *
+ * The return value is the number of words(32bits) which would be generated for
+ * the given input.
+ *
+ * NOTE:
+ * If the return value is greater than @size, the resulting bin_buf is NOT
+ * valid for bstr_printf().
+ */
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
+{
+ struct printf_spec spec = {0};
+ char *str, *end;
+ int width;
+
+ str = (char *)bin_buf;
+ end = (char *)(bin_buf + size);
+
+#define save_arg(type) \
+({ \
+ unsigned long long value; \
+ if (sizeof(type) == 8) { \
+ unsigned long long val8; \
+ str = PTR_ALIGN(str, sizeof(u32)); \
+ val8 = va_arg(args, unsigned long long); \
+ if (str + sizeof(type) <= end) { \
+ *(u32 *)str = *(u32 *)&val8; \
+ *(u32 *)(str + 4) = *((u32 *)&val8 + 1); \
+ } \
+ value = val8; \
+ } else { \
+ unsigned int val4; \
+ str = PTR_ALIGN(str, sizeof(type)); \
+ val4 = va_arg(args, int); \
+ if (str + sizeof(type) <= end) \
+ *(typeof(type) *)str = (type)(long)val4; \
+ value = (unsigned long long)val4; \
+ } \
+ str += sizeof(type); \
+ value; \
+})
+
+ while (*fmt) {
+ int read = format_decode(fmt, &spec);
+
+ fmt += read;
+
+ switch (spec.type) {
+ case FORMAT_TYPE_NONE:
+ case FORMAT_TYPE_PERCENT_CHAR:
+ break;
+ case FORMAT_TYPE_INVALID:
+ goto out;
+
+ case FORMAT_TYPE_WIDTH:
+ case FORMAT_TYPE_PRECISION:
+ width = (int)save_arg(int);
+ /* Pointers may require the width */
+ if (*fmt == 'p')
+ set_field_width(&spec, width);
+ break;
+
+ case FORMAT_TYPE_CHAR:
+ save_arg(char);
+ break;
+
+ case FORMAT_TYPE_STR: {
+ const char *save_str = va_arg(args, char *);
+ const char *err_msg;
+ size_t len;
+
+ err_msg = check_pointer_msg(save_str);
+ if (err_msg)
+ save_str = err_msg;
+
+ len = strlen(save_str) + 1;
+ if (str + len < end)
+ memcpy(str, save_str, len);
+ str += len;
+ break;
+ }
+
+ case FORMAT_TYPE_PTR:
+ /* Dereferenced pointers must be done now */
+ switch (*fmt) {
+ /* Dereference of functions is still OK */
+ case 'S':
+ case 's':
+ case 'x':
+ case 'K':
+ case 'e':
+ save_arg(void *);
+ break;
+ default:
+ if (!isalnum(*fmt)) {
+ save_arg(void *);
+ break;
+ }
+ str = pointer(fmt, str, end, va_arg(args, void *),
+ spec);
+ if (str + 1 < end)
+ *str++ = '\0';
+ else
+ end[-1] = '\0'; /* Must be nul terminated */
+ }
+ /* skip all alphanumeric pointer suffixes */
+ while (isalnum(*fmt))
+ fmt++;
+ break;
+
+ default:
+ switch (spec.type) {
+
+ case FORMAT_TYPE_LONG_LONG:
+ save_arg(long long);
+ break;
+ case FORMAT_TYPE_ULONG:
+ case FORMAT_TYPE_LONG:
+ save_arg(unsigned long);
+ break;
+ case FORMAT_TYPE_SIZE_T:
+ save_arg(size_t);
+ break;
+ case FORMAT_TYPE_PTRDIFF:
+ save_arg(ptrdiff_t);
+ break;
+ case FORMAT_TYPE_UBYTE:
+ case FORMAT_TYPE_BYTE:
+ save_arg(char);
+ break;
+ case FORMAT_TYPE_USHORT:
+ case FORMAT_TYPE_SHORT:
+ save_arg(short);
+ break;
+ default:
+ save_arg(int);
+ }
+ }
+ }
+
+out:
+ return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
+#undef save_arg
+}
+EXPORT_SYMBOL_GPL(vbin_printf);
+
+/**
+ * bstr_printf - Format a string from binary arguments and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @size: The size of the buffer, including the trailing null space
+ * @fmt: The format string to use
+ * @bin_buf: Binary arguments for the format string
+ *
+ * This function like C99 vsnprintf, but the difference is that vsnprintf gets
+ * arguments from stack, and bstr_printf gets arguments from @bin_buf which is
+ * a binary buffer that generated by vbin_printf.
+ *
+ * The format follows C99 vsnprintf, but has some extensions:
+ * see vsnprintf comment for details.
+ *
+ * The return value is the number of characters which would
+ * be generated for the given input, excluding the trailing
+ * '\0', as per ISO C99. If you want to have the exact
+ * number of characters written into @buf as return value
+ * (not including the trailing '\0'), use vscnprintf(). If the
+ * return is greater than or equal to @size, the resulting
+ * string is truncated.
+ */
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+{
+ struct printf_spec spec = {0};
+ char *str, *end;
+ const char *args = (const char *)bin_buf;
+
+ if (WARN_ON_ONCE(size > INT_MAX))
+ return 0;
+
+ str = buf;
+ end = buf + size;
+
+#define get_arg(type) \
+({ \
+ typeof(type) value; \
+ if (sizeof(type) == 8) { \
+ args = PTR_ALIGN(args, sizeof(u32)); \
+ *(u32 *)&value = *(u32 *)args; \
+ *((u32 *)&value + 1) = *(u32 *)(args + 4); \
+ } else { \
+ args = PTR_ALIGN(args, sizeof(type)); \
+ value = *(typeof(type) *)args; \
+ } \
+ args += sizeof(type); \
+ value; \
+})
+
+ /* Make sure end is always >= buf */
+ if (end < buf) {
+ end = ((void *)-1);
+ size = end - buf;
+ }
+
+ while (*fmt) {
+ const char *old_fmt = fmt;
+ int read = format_decode(fmt, &spec);
+
+ fmt += read;
+
+ switch (spec.type) {
+ case FORMAT_TYPE_NONE: {
+ int copy = read;
+ if (str < end) {
+ if (copy > end - str)
+ copy = end - str;
+ memcpy(str, old_fmt, copy);
+ }
+ str += read;
+ break;
+ }
+
+ case FORMAT_TYPE_WIDTH:
+ set_field_width(&spec, get_arg(int));
+ break;
+
+ case FORMAT_TYPE_PRECISION:
+ set_precision(&spec, get_arg(int));
+ break;
+
+ case FORMAT_TYPE_CHAR: {
+ char c;
+
+ if (!(spec.flags & LEFT)) {
+ while (--spec.field_width > 0) {
+ if (str < end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ c = (unsigned char) get_arg(char);
+ if (str < end)
+ *str = c;
+ ++str;
+ while (--spec.field_width > 0) {
+ if (str < end)
+ *str = ' ';
+ ++str;
+ }
+ break;
+ }
+
+ case FORMAT_TYPE_STR: {
+ const char *str_arg = args;
+ args += strlen(str_arg) + 1;
+ str = string(str, end, (char *)str_arg, spec);
+ break;
+ }
+
+ case FORMAT_TYPE_PTR: {
+ bool process = false;
+ int copy, len;
+ /* Non function dereferences were already done */
+ switch (*fmt) {
+ case 'S':
+ case 's':
+ case 'x':
+ case 'K':
+ case 'e':
+ process = true;
+ break;
+ default:
+ if (!isalnum(*fmt)) {
+ process = true;
+ break;
+ }
+ /* Pointer dereference was already processed */
+ if (str < end) {
+ len = copy = strlen(args);
+ if (copy > end - str)
+ copy = end - str;
+ memcpy(str, args, copy);
+ str += len;
+ args += len + 1;
+ }
+ }
+ if (process)
+ str = pointer(fmt, str, end, get_arg(void *), spec);
+
+ while (isalnum(*fmt))
+ fmt++;
+ break;
+ }
+
+ case FORMAT_TYPE_PERCENT_CHAR:
+ if (str < end)
+ *str = '%';
+ ++str;
+ break;
+
+ case FORMAT_TYPE_INVALID:
+ goto out;
+
+ default: {
+ unsigned long long num;
+
+ switch (spec.type) {
+
+ case FORMAT_TYPE_LONG_LONG:
+ num = get_arg(long long);
+ break;
+ case FORMAT_TYPE_ULONG:
+ case FORMAT_TYPE_LONG:
+ num = get_arg(unsigned long);
+ break;
+ case FORMAT_TYPE_SIZE_T:
+ num = get_arg(size_t);
+ break;
+ case FORMAT_TYPE_PTRDIFF:
+ num = get_arg(ptrdiff_t);
+ break;
+ case FORMAT_TYPE_UBYTE:
+ num = get_arg(unsigned char);
+ break;
+ case FORMAT_TYPE_BYTE:
+ num = get_arg(signed char);
+ break;
+ case FORMAT_TYPE_USHORT:
+ num = get_arg(unsigned short);
+ break;
+ case FORMAT_TYPE_SHORT:
+ num = get_arg(short);
+ break;
+ case FORMAT_TYPE_UINT:
+ num = get_arg(unsigned int);
+ break;
+ default:
+ num = get_arg(int);
+ }
+
+ str = number(str, end, num, spec);
+ } /* default: */
+ } /* switch(spec.type) */
+ } /* while(*fmt) */
+
+out:
+ if (size > 0) {
+ if (str < end)
+ *str = '\0';
+ else
+ end[-1] = '\0';
+ }
+
+#undef get_arg
+
+ /* the trailing null byte doesn't count towards the total */
+ return str - buf;
+}
+EXPORT_SYMBOL_GPL(bstr_printf);
+
+/**
+ * bprintf - Parse a format string and place args' binary value in a buffer
+ * @bin_buf: The buffer to place args' binary value
+ * @size: The size of the buffer(by words(32bits), not characters)
+ * @fmt: The format string to use
+ * @...: Arguments for the format string
+ *
+ * The function returns the number of words(u32) written
+ * into @bin_buf.
+ */
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = vbin_printf(bin_buf, size, fmt, args);
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bprintf);
+
+#endif /* CONFIG_BINARY_PRINTF */
+
+/**
+ * vsscanf - Unformat a buffer into a list of arguments
+ * @buf: input buffer
+ * @fmt: format of buffer
+ * @args: arguments
+ */
+int vsscanf(const char *buf, const char *fmt, va_list args)
+{
+ const char *str = buf;
+ char *next;
+ char digit;
+ int num = 0;
+ u8 qualifier;
+ unsigned int base;
+ union {
+ long long s;
+ unsigned long long u;
+ } val;
+ s16 field_width;
+ bool is_sign;
+
+ while (*fmt) {
+ /* skip any white space in format */
+ /* white space in format matches any amount of
+ * white space, including none, in the input.
+ */
+ if (isspace(*fmt)) {
+ fmt = skip_spaces(++fmt);
+ str = skip_spaces(str);
+ }
+
+ /* anything that is not a conversion must match exactly */
+ if (*fmt != '%' && *fmt) {
+ if (*fmt++ != *str++)
+ break;
+ continue;
+ }
+
+ if (!*fmt)
+ break;
+ ++fmt;
+
+ /* skip this conversion.
+ * advance both strings to next white space
+ */
+ if (*fmt == '*') {
+ if (!*str)
+ break;
+ while (!isspace(*fmt) && *fmt != '%' && *fmt) {
+ /* '%*[' not yet supported, invalid format */
+ if (*fmt == '[')
+ return num;
+ fmt++;
+ }
+ while (!isspace(*str) && *str)
+ str++;
+ continue;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt)) {
+ field_width = skip_atoi(&fmt);
+ if (field_width <= 0)
+ break;
+ }
+
+ /* get conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
+ *fmt == 'z') {
+ qualifier = *fmt++;
+ if (unlikely(qualifier == *fmt)) {
+ if (qualifier == 'h') {
+ qualifier = 'H';
+ fmt++;
+ } else if (qualifier == 'l') {
+ qualifier = 'L';
+ fmt++;
+ }
+ }
+ }
+
+ if (!*fmt)
+ break;
+
+ if (*fmt == 'n') {
+ /* return number of characters read so far */
+ *va_arg(args, int *) = str - buf;
+ ++fmt;
+ continue;
+ }
+
+ if (!*str)
+ break;
+
+ base = 10;
+ is_sign = false;
+
+ switch (*fmt++) {
+ case 'c':
+ {
+ char *s = (char *)va_arg(args, char*);
+ if (field_width == -1)
+ field_width = 1;
+ do {
+ *s++ = *str++;
+ } while (--field_width > 0 && *str);
+ num++;
+ }
+ continue;
+ case 's':
+ {
+ char *s = (char *)va_arg(args, char *);
+ if (field_width == -1)
+ field_width = SHRT_MAX;
+ /* first, skip leading white space in buffer */
+ str = skip_spaces(str);
+
+ /* now copy until next white space */
+ while (*str && !isspace(*str) && field_width--)
+ *s++ = *str++;
+ *s = '\0';
+ num++;
+ }
+ continue;
+ /*
+ * Warning: This implementation of the '[' conversion specifier
+ * deviates from its glibc counterpart in the following ways:
+ * (1) It does NOT support ranges i.e. '-' is NOT a special
+ * character
+ * (2) It cannot match the closing bracket ']' itself
+ * (3) A field width is required
+ * (4) '%*[' (discard matching input) is currently not supported
+ *
+ * Example usage:
+ * ret = sscanf("00:0a:95","%2[^:]:%2[^:]:%2[^:]",
+ * buf1, buf2, buf3);
+ * if (ret < 3)
+ * // etc..
+ */
+ case '[':
+ {
+ char *s = (char *)va_arg(args, char *);
+ DECLARE_BITMAP(set, 256) = {0};
+ unsigned int len = 0;
+ bool negate = (*fmt == '^');
+
+ /* field width is required */
+ if (field_width == -1)
+ return num;
+
+ if (negate)
+ ++fmt;
+
+ for ( ; *fmt && *fmt != ']'; ++fmt, ++len)
+ __set_bit((u8)*fmt, set);
+
+ /* no ']' or no character set found */
+ if (!*fmt || !len)
+ return num;
+ ++fmt;
+
+ if (negate) {
+ bitmap_complement(set, set, 256);
+ /* exclude null '\0' byte */
+ __clear_bit(0, set);
+ }
+
+ /* match must be non-empty */
+ if (!test_bit((u8)*str, set))
+ return num;
+
+ while (test_bit((u8)*str, set) && field_width--)
+ *s++ = *str++;
+ *s = '\0';
+ ++num;
+ }
+ continue;
+ case 'o':
+ base = 8;
+ break;
+ case 'x':
+ case 'X':
+ base = 16;
+ break;
+ case 'i':
+ base = 0;
+ fallthrough;
+ case 'd':
+ is_sign = true;
+ fallthrough;
+ case 'u':
+ break;
+ case '%':
+ /* looking for '%' in str */
+ if (*str++ != '%')
+ return num;
+ continue;
+ default:
+ /* invalid format; stop here */
+ return num;
+ }
+
+ /* have some sort of integer conversion.
+ * first, skip white space in buffer.
+ */
+ str = skip_spaces(str);
+
+ digit = *str;
+ if (is_sign && digit == '-') {
+ if (field_width == 1)
+ break;
+
+ digit = *(str + 1);
+ }
+
+ if (!digit
+ || (base == 16 && !isxdigit(digit))
+ || (base == 10 && !isdigit(digit))
+ || (base == 8 && !isodigit(digit))
+ || (base == 0 && !isdigit(digit)))
+ break;
+
+ if (is_sign)
+ val.s = simple_strntoll(str,
+ field_width >= 0 ? field_width : INT_MAX,
+ &next, base);
+ else
+ val.u = simple_strntoull(str,
+ field_width >= 0 ? field_width : INT_MAX,
+ &next, base);
+
+ switch (qualifier) {
+ case 'H': /* that's 'hh' in format */
+ if (is_sign)
+ *va_arg(args, signed char *) = val.s;
+ else
+ *va_arg(args, unsigned char *) = val.u;
+ break;
+ case 'h':
+ if (is_sign)
+ *va_arg(args, short *) = val.s;
+ else
+ *va_arg(args, unsigned short *) = val.u;
+ break;
+ case 'l':
+ if (is_sign)
+ *va_arg(args, long *) = val.s;
+ else
+ *va_arg(args, unsigned long *) = val.u;
+ break;
+ case 'L':
+ if (is_sign)
+ *va_arg(args, long long *) = val.s;
+ else
+ *va_arg(args, unsigned long long *) = val.u;
+ break;
+ case 'z':
+ *va_arg(args, size_t *) = val.u;
+ break;
+ default:
+ if (is_sign)
+ *va_arg(args, int *) = val.s;
+ else
+ *va_arg(args, unsigned int *) = val.u;
+ break;
+ }
+ num++;
+
+ if (!next)
+ break;
+ str = next;
+ }
+
+ return num;
+}
+EXPORT_SYMBOL(vsscanf);
+
+/**
+ * sscanf - Unformat a buffer into a list of arguments
+ * @buf: input buffer
+ * @fmt: formatting of buffer
+ * @...: resulting arguments
+ */
+int sscanf(const char *buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsscanf(buf, fmt, args);
+ va_end(args);
+
+ return i;
+}
+EXPORT_SYMBOL(sscanf);
diff --git a/lib/win_minmax.c b/lib/win_minmax.c
new file mode 100644
index 000000000..ec1050683
--- /dev/null
+++ b/lib/win_minmax.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * lib/minmax.c: windowed min/max tracker
+ *
+ * Kathleen Nichols' algorithm for tracking the minimum (or maximum)
+ * value of a data stream over some fixed time interval. (E.g.,
+ * the minimum RTT over the past five minutes.) It uses constant
+ * space and constant time per update yet almost always delivers
+ * the same minimum as an implementation that has to keep all the
+ * data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of
+ * the n'th best >= n-1'th best. It also makes sure that the three
+ * values are widely separated in the time window since that bounds
+ * the worse case error when that data is monotonically increasing
+ * over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because
+ * it has no value - the new min is <= everything else in the window
+ * by definition and it's the most recent. So we restart fresh on
+ * every new min and overwrites 2nd & 3rd choices. The same property
+ * holds for 2nd & 3rd best.
+ */
+#include <linux/module.h>
+#include <linux/win_minmax.h>
+
+/* As time advances, update the 1st, 2nd, and 3rd choices. */
+static u32 minmax_subwin_update(struct minmax *m, u32 win,
+ const struct minmax_sample *val)
+{
+ u32 dt = val->t - m->s[0].t;
+
+ if (unlikely(dt > win)) {
+ /*
+ * Passed entire window without a new val so make 2nd
+ * choice the new val & 3rd choice the new 2nd choice.
+ * we may have to iterate this since our 2nd choice
+ * may also be outside the window (we checked on entry
+ * that the third choice was in the window).
+ */
+ m->s[0] = m->s[1];
+ m->s[1] = m->s[2];
+ m->s[2] = *val;
+ if (unlikely(val->t - m->s[0].t > win)) {
+ m->s[0] = m->s[1];
+ m->s[1] = m->s[2];
+ m->s[2] = *val;
+ }
+ } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) {
+ /*
+ * We've passed a quarter of the window without a new val
+ * so take a 2nd choice from the 2nd quarter of the window.
+ */
+ m->s[2] = m->s[1] = *val;
+ } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) {
+ /*
+ * We've passed half the window without finding a new val
+ * so take a 3rd choice from the last half of the window
+ */
+ m->s[2] = *val;
+ }
+ return m->s[0].v;
+}
+
+/* Check if new measurement updates the 1st, 2nd or 3rd choice max. */
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ if (unlikely(val.v >= m->s[0].v) || /* found new max? */
+ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
+ return minmax_reset(m, t, meas); /* forget earlier samples */
+
+ if (unlikely(val.v >= m->s[1].v))
+ m->s[2] = m->s[1] = val;
+ else if (unlikely(val.v >= m->s[2].v))
+ m->s[2] = val;
+
+ return minmax_subwin_update(m, win, &val);
+}
+EXPORT_SYMBOL(minmax_running_max);
+
+/* Check if new measurement updates the 1st, 2nd or 3rd choice min. */
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ if (unlikely(val.v <= m->s[0].v) || /* found new min? */
+ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
+ return minmax_reset(m, t, meas); /* forget earlier samples */
+
+ if (unlikely(val.v <= m->s[1].v))
+ m->s[2] = m->s[1] = val;
+ else if (unlikely(val.v <= m->s[2].v))
+ m->s[2] = val;
+
+ return minmax_subwin_update(m, win, &val);
+}
diff --git a/lib/xarray.c b/lib/xarray.c
new file mode 100644
index 000000000..39f07bfc4
--- /dev/null
+++ b/lib/xarray.c
@@ -0,0 +1,2309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * XArray implementation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "radix-tree.h"
+
+/*
+ * Coding conventions in this file:
+ *
+ * @xa is used to refer to the entire xarray.
+ * @xas is the 'xarray operation state'. It may be either a pointer to
+ * an xa_state, or an xa_state stored on the stack. This is an unfortunate
+ * ambiguity.
+ * @index is the index of the entry being operated on
+ * @mark is an xa_mark_t; a small number indicating one of the mark bits.
+ * @node refers to an xa_node; usually the primary one being operated on by
+ * this function.
+ * @offset is the index into the slots array inside an xa_node.
+ * @parent refers to the @xa_node closer to the head than @node.
+ * @entry refers to something stored in a slot in the xarray
+ */
+
+static inline unsigned int xa_lock_type(const struct xarray *xa)
+{
+ return (__force unsigned int)xa->xa_flags & 3;
+}
+
+static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
+{
+ if (lock_type == XA_LOCK_IRQ)
+ xas_lock_irq(xas);
+ else if (lock_type == XA_LOCK_BH)
+ xas_lock_bh(xas);
+ else
+ xas_lock(xas);
+}
+
+static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
+{
+ if (lock_type == XA_LOCK_IRQ)
+ xas_unlock_irq(xas);
+ else if (lock_type == XA_LOCK_BH)
+ xas_unlock_bh(xas);
+ else
+ xas_unlock(xas);
+}
+
+static inline bool xa_track_free(const struct xarray *xa)
+{
+ return xa->xa_flags & XA_FLAGS_TRACK_FREE;
+}
+
+static inline bool xa_zero_busy(const struct xarray *xa)
+{
+ return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
+}
+
+static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
+{
+ if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
+ xa->xa_flags |= XA_FLAGS_MARK(mark);
+}
+
+static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
+{
+ if (xa->xa_flags & XA_FLAGS_MARK(mark))
+ xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
+}
+
+static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
+{
+ return node->marks[(__force unsigned)mark];
+}
+
+static inline bool node_get_mark(struct xa_node *node,
+ unsigned int offset, xa_mark_t mark)
+{
+ return test_bit(offset, node_marks(node, mark));
+}
+
+/* returns true if the bit was set */
+static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
+ xa_mark_t mark)
+{
+ return __test_and_set_bit(offset, node_marks(node, mark));
+}
+
+/* returns true if the bit was set */
+static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
+ xa_mark_t mark)
+{
+ return __test_and_clear_bit(offset, node_marks(node, mark));
+}
+
+static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
+{
+ return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
+}
+
+static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
+{
+ bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
+}
+
+#define mark_inc(mark) do { \
+ mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
+} while (0)
+
+/*
+ * xas_squash_marks() - Merge all marks to the first entry
+ * @xas: Array operation state.
+ *
+ * Set a mark on the first entry if any entry has it set. Clear marks on
+ * all sibling entries.
+ */
+static void xas_squash_marks(const struct xa_state *xas)
+{
+ unsigned int mark = 0;
+ unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
+
+ if (!xas->xa_sibs)
+ return;
+
+ do {
+ unsigned long *marks = xas->xa_node->marks[mark];
+ if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
+ continue;
+ __set_bit(xas->xa_offset, marks);
+ bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
+ } while (mark++ != (__force unsigned)XA_MARK_MAX);
+}
+
+/* extracts the offset within this node from the index */
+static unsigned int get_offset(unsigned long index, struct xa_node *node)
+{
+ return (index >> node->shift) & XA_CHUNK_MASK;
+}
+
+static void xas_set_offset(struct xa_state *xas)
+{
+ xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
+}
+
+/* move the index either forwards (find) or backwards (sibling slot) */
+static void xas_move_index(struct xa_state *xas, unsigned long offset)
+{
+ unsigned int shift = xas->xa_node->shift;
+ xas->xa_index &= ~XA_CHUNK_MASK << shift;
+ xas->xa_index += offset << shift;
+}
+
+static void xas_next_offset(struct xa_state *xas)
+{
+ xas->xa_offset++;
+ xas_move_index(xas, xas->xa_offset);
+}
+
+static void *set_bounds(struct xa_state *xas)
+{
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+}
+
+/*
+ * Starts a walk. If the @xas is already valid, we assume that it's on
+ * the right path and just return where we've got to. If we're in an
+ * error state, return NULL. If the index is outside the current scope
+ * of the xarray, return NULL without changing @xas->xa_node. Otherwise
+ * set @xas->xa_node to NULL and return the current head of the array.
+ */
+static void *xas_start(struct xa_state *xas)
+{
+ void *entry;
+
+ if (xas_valid(xas))
+ return xas_reload(xas);
+ if (xas_error(xas))
+ return NULL;
+
+ entry = xa_head(xas->xa);
+ if (!xa_is_node(entry)) {
+ if (xas->xa_index)
+ return set_bounds(xas);
+ } else {
+ if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
+ return set_bounds(xas);
+ }
+
+ xas->xa_node = NULL;
+ return entry;
+}
+
+static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+{
+ unsigned int offset = get_offset(xas->xa_index, node);
+ void *entry = xa_entry(xas->xa, node, offset);
+
+ xas->xa_node = node;
+ while (xa_is_sibling(entry)) {
+ offset = xa_to_sibling(entry);
+ entry = xa_entry(xas->xa, node, offset);
+ if (node->shift && xa_is_node(entry))
+ entry = XA_RETRY_ENTRY;
+ }
+
+ xas->xa_offset = offset;
+ return entry;
+}
+
+/**
+ * xas_load() - Load an entry from the XArray (advanced).
+ * @xas: XArray operation state.
+ *
+ * Usually walks the @xas to the appropriate state to load the entry
+ * stored at xa_index. However, it will do nothing and return %NULL if
+ * @xas is in an error state. xas_load() will never expand the tree.
+ *
+ * If the xa_state is set up to operate on a multi-index entry, xas_load()
+ * may return %NULL or an internal entry, even if there are entries
+ * present within the range specified by @xas.
+ *
+ * Context: Any context. The caller should hold the xa_lock or the RCU lock.
+ * Return: Usually an entry in the XArray, but see description for exceptions.
+ */
+void *xas_load(struct xa_state *xas)
+{
+ void *entry = xas_start(xas);
+
+ while (xa_is_node(entry)) {
+ struct xa_node *node = xa_to_node(entry);
+
+ if (xas->xa_shift > node->shift)
+ break;
+ entry = xas_descend(xas, node);
+ if (node->shift == 0)
+ break;
+ }
+ return entry;
+}
+EXPORT_SYMBOL_GPL(xas_load);
+
+#define XA_RCU_FREE ((struct xarray *)1)
+
+static void xa_node_free(struct xa_node *node)
+{
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ node->array = XA_RCU_FREE;
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+}
+
+/*
+ * xas_destroy() - Free any resources allocated during the XArray operation.
+ * @xas: XArray operation state.
+ *
+ * Most users will not need to call this function; it is called for you
+ * by xas_nomem().
+ */
+void xas_destroy(struct xa_state *xas)
+{
+ struct xa_node *next, *node = xas->xa_alloc;
+
+ while (node) {
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ next = rcu_dereference_raw(node->parent);
+ radix_tree_node_rcu_free(&node->rcu_head);
+ xas->xa_alloc = node = next;
+ }
+}
+
+/**
+ * xas_nomem() - Allocate memory if needed.
+ * @xas: XArray operation state.
+ * @gfp: Memory allocation flags.
+ *
+ * If we need to add new nodes to the XArray, we try to allocate memory
+ * with GFP_NOWAIT while holding the lock, which will usually succeed.
+ * If it fails, @xas is flagged as needing memory to continue. The caller
+ * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
+ * the caller should retry the operation.
+ *
+ * Forward progress is guaranteed as one node is allocated here and
+ * stored in the xa_state where it will be found by xas_alloc(). More
+ * nodes will likely be found in the slab allocator, but we do not tie
+ * them up here.
+ *
+ * Return: true if memory was needed, and was successfully allocated.
+ */
+bool xas_nomem(struct xa_state *xas, gfp_t gfp)
+{
+ if (xas->xa_node != XA_ERROR(-ENOMEM)) {
+ xas_destroy(xas);
+ return false;
+ }
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
+ if (!xas->xa_alloc)
+ return false;
+ xas->xa_alloc->parent = NULL;
+ XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
+ xas->xa_node = XAS_RESTART;
+ return true;
+}
+EXPORT_SYMBOL_GPL(xas_nomem);
+
+/*
+ * __xas_nomem() - Drop locks and allocate memory if needed.
+ * @xas: XArray operation state.
+ * @gfp: Memory allocation flags.
+ *
+ * Internal variant of xas_nomem().
+ *
+ * Return: true if memory was needed, and was successfully allocated.
+ */
+static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
+ __must_hold(xas->xa->xa_lock)
+{
+ unsigned int lock_type = xa_lock_type(xas->xa);
+
+ if (xas->xa_node != XA_ERROR(-ENOMEM)) {
+ xas_destroy(xas);
+ return false;
+ }
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
+ if (gfpflags_allow_blocking(gfp)) {
+ xas_unlock_type(xas, lock_type);
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
+ xas_lock_type(xas, lock_type);
+ } else {
+ xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
+ }
+ if (!xas->xa_alloc)
+ return false;
+ xas->xa_alloc->parent = NULL;
+ XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
+ xas->xa_node = XAS_RESTART;
+ return true;
+}
+
+static void xas_update(struct xa_state *xas, struct xa_node *node)
+{
+ if (xas->xa_update)
+ xas->xa_update(node);
+ else
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+}
+
+static void *xas_alloc(struct xa_state *xas, unsigned int shift)
+{
+ struct xa_node *parent = xas->xa_node;
+ struct xa_node *node = xas->xa_alloc;
+
+ if (xas_invalid(xas))
+ return NULL;
+
+ if (node) {
+ xas->xa_alloc = NULL;
+ } else {
+ gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
+
+ if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
+ gfp |= __GFP_ACCOUNT;
+
+ node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
+ if (!node) {
+ xas_set_err(xas, -ENOMEM);
+ return NULL;
+ }
+ }
+
+ if (parent) {
+ node->offset = xas->xa_offset;
+ parent->count++;
+ XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
+ xas_update(xas, parent);
+ }
+ XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
+ XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
+ node->shift = shift;
+ node->count = 0;
+ node->nr_values = 0;
+ RCU_INIT_POINTER(node->parent, xas->xa_node);
+ node->array = xas->xa;
+
+ return node;
+}
+
+#ifdef CONFIG_XARRAY_MULTI
+/* Returns the number of indices covered by a given xa_state */
+static unsigned long xas_size(const struct xa_state *xas)
+{
+ return (xas->xa_sibs + 1UL) << xas->xa_shift;
+}
+#endif
+
+/*
+ * Use this to calculate the maximum index that will need to be created
+ * in order to add the entry described by @xas. Because we cannot store a
+ * multi-index entry at index 0, the calculation is a little more complex
+ * than you might expect.
+ */
+static unsigned long xas_max(struct xa_state *xas)
+{
+ unsigned long max = xas->xa_index;
+
+#ifdef CONFIG_XARRAY_MULTI
+ if (xas->xa_shift || xas->xa_sibs) {
+ unsigned long mask = xas_size(xas) - 1;
+ max |= mask;
+ if (mask == max)
+ max++;
+ }
+#endif
+
+ return max;
+}
+
+/* The maximum index that can be contained in the array without expanding it */
+static unsigned long max_index(void *entry)
+{
+ if (!xa_is_node(entry))
+ return 0;
+ return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
+}
+
+static void xas_shrink(struct xa_state *xas)
+{
+ struct xarray *xa = xas->xa;
+ struct xa_node *node = xas->xa_node;
+
+ for (;;) {
+ void *entry;
+
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ if (node->count != 1)
+ break;
+ entry = xa_entry_locked(xa, node, 0);
+ if (!entry)
+ break;
+ if (!xa_is_node(entry) && node->shift)
+ break;
+ if (xa_is_zero(entry) && xa_zero_busy(xa))
+ entry = NULL;
+ xas->xa_node = XAS_BOUNDS;
+
+ RCU_INIT_POINTER(xa->xa_head, entry);
+ if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
+ xa_mark_clear(xa, XA_FREE_MARK);
+
+ node->count = 0;
+ node->nr_values = 0;
+ if (!xa_is_node(entry))
+ RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
+ xas_update(xas, node);
+ xa_node_free(node);
+ if (!xa_is_node(entry))
+ break;
+ node = xa_to_node(entry);
+ node->parent = NULL;
+ }
+}
+
+/*
+ * xas_delete_node() - Attempt to delete an xa_node
+ * @xas: Array operation state.
+ *
+ * Attempts to delete the @xas->xa_node. This will fail if xa->node has
+ * a non-zero reference count.
+ */
+static void xas_delete_node(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ for (;;) {
+ struct xa_node *parent;
+
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ if (node->count)
+ break;
+
+ parent = xa_parent_locked(xas->xa, node);
+ xas->xa_node = parent;
+ xas->xa_offset = node->offset;
+ xa_node_free(node);
+
+ if (!parent) {
+ xas->xa->xa_head = NULL;
+ xas->xa_node = XAS_BOUNDS;
+ return;
+ }
+
+ parent->slots[xas->xa_offset] = NULL;
+ parent->count--;
+ XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
+ node = parent;
+ xas_update(xas, node);
+ }
+
+ if (!node->parent)
+ xas_shrink(xas);
+}
+
+/**
+ * xas_free_nodes() - Free this node and all nodes that it references
+ * @xas: Array operation state.
+ * @top: Node to free
+ *
+ * This node has been removed from the tree. We must now free it and all
+ * of its subnodes. There may be RCU walkers with references into the tree,
+ * so we must replace all entries with retry markers.
+ */
+static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
+{
+ unsigned int offset = 0;
+ struct xa_node *node = top;
+
+ for (;;) {
+ void *entry = xa_entry_locked(xas->xa, node, offset);
+
+ if (node->shift && xa_is_node(entry)) {
+ node = xa_to_node(entry);
+ offset = 0;
+ continue;
+ }
+ if (entry)
+ RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
+ offset++;
+ while (offset == XA_CHUNK_SIZE) {
+ struct xa_node *parent;
+
+ parent = xa_parent_locked(xas->xa, node);
+ offset = node->offset + 1;
+ node->count = 0;
+ node->nr_values = 0;
+ xas_update(xas, node);
+ xa_node_free(node);
+ if (node == top)
+ return;
+ node = parent;
+ }
+ }
+}
+
+/*
+ * xas_expand adds nodes to the head of the tree until it has reached
+ * sufficient height to be able to contain @xas->xa_index
+ */
+static int xas_expand(struct xa_state *xas, void *head)
+{
+ struct xarray *xa = xas->xa;
+ struct xa_node *node = NULL;
+ unsigned int shift = 0;
+ unsigned long max = xas_max(xas);
+
+ if (!head) {
+ if (max == 0)
+ return 0;
+ while ((max >> shift) >= XA_CHUNK_SIZE)
+ shift += XA_CHUNK_SHIFT;
+ return shift + XA_CHUNK_SHIFT;
+ } else if (xa_is_node(head)) {
+ node = xa_to_node(head);
+ shift = node->shift + XA_CHUNK_SHIFT;
+ }
+ xas->xa_node = NULL;
+
+ while (max > max_index(head)) {
+ xa_mark_t mark = 0;
+
+ XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
+ node = xas_alloc(xas, shift);
+ if (!node)
+ return -ENOMEM;
+
+ node->count = 1;
+ if (xa_is_value(head))
+ node->nr_values = 1;
+ RCU_INIT_POINTER(node->slots[0], head);
+
+ /* Propagate the aggregated mark info to the new child */
+ for (;;) {
+ if (xa_track_free(xa) && mark == XA_FREE_MARK) {
+ node_mark_all(node, XA_FREE_MARK);
+ if (!xa_marked(xa, XA_FREE_MARK)) {
+ node_clear_mark(node, 0, XA_FREE_MARK);
+ xa_mark_set(xa, XA_FREE_MARK);
+ }
+ } else if (xa_marked(xa, mark)) {
+ node_set_mark(node, 0, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ /*
+ * Now that the new node is fully initialised, we can add
+ * it to the tree
+ */
+ if (xa_is_node(head)) {
+ xa_to_node(head)->offset = 0;
+ rcu_assign_pointer(xa_to_node(head)->parent, node);
+ }
+ head = xa_mk_node(node);
+ rcu_assign_pointer(xa->xa_head, head);
+ xas_update(xas, node);
+
+ shift += XA_CHUNK_SHIFT;
+ }
+
+ xas->xa_node = node;
+ return shift;
+}
+
+/*
+ * xas_create() - Create a slot to store an entry in.
+ * @xas: XArray operation state.
+ * @allow_root: %true if we can store the entry in the root directly
+ *
+ * Most users will not need to call this function directly, as it is called
+ * by xas_store(). It is useful for doing conditional store operations
+ * (see the xa_cmpxchg() implementation for an example).
+ *
+ * Return: If the slot already existed, returns the contents of this slot.
+ * If the slot was newly created, returns %NULL. If it failed to create the
+ * slot, returns %NULL and indicates the error in @xas.
+ */
+static void *xas_create(struct xa_state *xas, bool allow_root)
+{
+ struct xarray *xa = xas->xa;
+ void *entry;
+ void __rcu **slot;
+ struct xa_node *node = xas->xa_node;
+ int shift;
+ unsigned int order = xas->xa_shift;
+
+ if (xas_top(node)) {
+ entry = xa_head_locked(xa);
+ xas->xa_node = NULL;
+ if (!entry && xa_zero_busy(xa))
+ entry = XA_ZERO_ENTRY;
+ shift = xas_expand(xas, entry);
+ if (shift < 0)
+ return NULL;
+ if (!shift && !allow_root)
+ shift = XA_CHUNK_SHIFT;
+ entry = xa_head_locked(xa);
+ slot = &xa->xa_head;
+ } else if (xas_error(xas)) {
+ return NULL;
+ } else if (node) {
+ unsigned int offset = xas->xa_offset;
+
+ shift = node->shift;
+ entry = xa_entry_locked(xa, node, offset);
+ slot = &node->slots[offset];
+ } else {
+ shift = 0;
+ entry = xa_head_locked(xa);
+ slot = &xa->xa_head;
+ }
+
+ while (shift > order) {
+ shift -= XA_CHUNK_SHIFT;
+ if (!entry) {
+ node = xas_alloc(xas, shift);
+ if (!node)
+ break;
+ if (xa_track_free(xa))
+ node_mark_all(node, XA_FREE_MARK);
+ rcu_assign_pointer(*slot, xa_mk_node(node));
+ } else if (xa_is_node(entry)) {
+ node = xa_to_node(entry);
+ } else {
+ break;
+ }
+ entry = xas_descend(xas, node);
+ slot = &node->slots[xas->xa_offset];
+ }
+
+ return entry;
+}
+
+/**
+ * xas_create_range() - Ensure that stores to this range will succeed
+ * @xas: XArray operation state.
+ *
+ * Creates all of the slots in the range covered by @xas. Sets @xas to
+ * create single-index entries and positions it at the beginning of the
+ * range. This is for the benefit of users which have not yet been
+ * converted to use multi-index entries.
+ */
+void xas_create_range(struct xa_state *xas)
+{
+ unsigned long index = xas->xa_index;
+ unsigned char shift = xas->xa_shift;
+ unsigned char sibs = xas->xa_sibs;
+
+ xas->xa_index |= ((sibs + 1UL) << shift) - 1;
+ if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
+ xas->xa_offset |= sibs;
+ xas->xa_shift = 0;
+ xas->xa_sibs = 0;
+
+ for (;;) {
+ xas_create(xas, true);
+ if (xas_error(xas))
+ goto restore;
+ if (xas->xa_index <= (index | XA_CHUNK_MASK))
+ goto success;
+ xas->xa_index -= XA_CHUNK_SIZE;
+
+ for (;;) {
+ struct xa_node *node = xas->xa_node;
+ if (node->shift >= shift)
+ break;
+ xas->xa_node = xa_parent_locked(xas->xa, node);
+ xas->xa_offset = node->offset - 1;
+ if (node->offset != 0)
+ break;
+ }
+ }
+
+restore:
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+ xas->xa_index = index;
+ return;
+success:
+ xas->xa_index = index;
+ if (xas->xa_node)
+ xas_set_offset(xas);
+}
+EXPORT_SYMBOL_GPL(xas_create_range);
+
+static void update_node(struct xa_state *xas, struct xa_node *node,
+ int count, int values)
+{
+ if (!node || (!count && !values))
+ return;
+
+ node->count += count;
+ node->nr_values += values;
+ XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
+ XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
+ xas_update(xas, node);
+ if (count < 0)
+ xas_delete_node(xas);
+}
+
+/**
+ * xas_store() - Store this entry in the XArray.
+ * @xas: XArray operation state.
+ * @entry: New entry.
+ *
+ * If @xas is operating on a multi-index entry, the entry returned by this
+ * function is essentially meaningless (it may be an internal entry or it
+ * may be %NULL, even if there are non-NULL entries at some of the indices
+ * covered by the range). This is not a problem for any current users,
+ * and can be changed if needed.
+ *
+ * Return: The old entry at this index.
+ */
+void *xas_store(struct xa_state *xas, void *entry)
+{
+ struct xa_node *node;
+ void __rcu **slot = &xas->xa->xa_head;
+ unsigned int offset, max;
+ int count = 0;
+ int values = 0;
+ void *first, *next;
+ bool value = xa_is_value(entry);
+
+ if (entry) {
+ bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
+ first = xas_create(xas, allow_root);
+ } else {
+ first = xas_load(xas);
+ }
+
+ if (xas_invalid(xas))
+ return first;
+ node = xas->xa_node;
+ if (node && (xas->xa_shift < node->shift))
+ xas->xa_sibs = 0;
+ if ((first == entry) && !xas->xa_sibs)
+ return first;
+
+ next = first;
+ offset = xas->xa_offset;
+ max = xas->xa_offset + xas->xa_sibs;
+ if (node) {
+ slot = &node->slots[offset];
+ if (xas->xa_sibs)
+ xas_squash_marks(xas);
+ }
+ if (!entry)
+ xas_init_marks(xas);
+
+ for (;;) {
+ /*
+ * Must clear the marks before setting the entry to NULL,
+ * otherwise xas_for_each_marked may find a NULL entry and
+ * stop early. rcu_assign_pointer contains a release barrier
+ * so the mark clearing will appear to happen before the
+ * entry is set to NULL.
+ */
+ rcu_assign_pointer(*slot, entry);
+ if (xa_is_node(next) && (!node || node->shift))
+ xas_free_nodes(xas, xa_to_node(next));
+ if (!node)
+ break;
+ count += !next - !entry;
+ values += !xa_is_value(first) - !value;
+ if (entry) {
+ if (offset == max)
+ break;
+ if (!xa_is_sibling(entry))
+ entry = xa_mk_sibling(xas->xa_offset);
+ } else {
+ if (offset == XA_CHUNK_MASK)
+ break;
+ }
+ next = xa_entry_locked(xas->xa, node, ++offset);
+ if (!xa_is_sibling(next)) {
+ if (!entry && (offset > max))
+ break;
+ first = next;
+ }
+ slot++;
+ }
+
+ update_node(xas, node, count, values);
+ return first;
+}
+EXPORT_SYMBOL_GPL(xas_store);
+
+/**
+ * xas_get_mark() - Returns the state of this mark.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Return: true if the mark is set, false if the mark is clear or @xas
+ * is in an error state.
+ */
+bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ if (xas_invalid(xas))
+ return false;
+ if (!xas->xa_node)
+ return xa_marked(xas->xa, mark);
+ return node_get_mark(xas->xa_node, xas->xa_offset, mark);
+}
+EXPORT_SYMBOL_GPL(xas_get_mark);
+
+/**
+ * xas_set_mark() - Sets the mark on this entry and its parents.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Sets the specified mark on this entry, and walks up the tree setting it
+ * on all the ancestor entries. Does nothing if @xas has not been walked to
+ * an entry, or is in an error state.
+ */
+void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset = xas->xa_offset;
+
+ if (xas_invalid(xas))
+ return;
+
+ while (node) {
+ if (node_set_mark(node, offset, mark))
+ return;
+ offset = node->offset;
+ node = xa_parent_locked(xas->xa, node);
+ }
+
+ if (!xa_marked(xas->xa, mark))
+ xa_mark_set(xas->xa, mark);
+}
+EXPORT_SYMBOL_GPL(xas_set_mark);
+
+/**
+ * xas_clear_mark() - Clears the mark on this entry and its parents.
+ * @xas: XArray operation state.
+ * @mark: Mark number.
+ *
+ * Clears the specified mark on this entry, and walks back to the head
+ * attempting to clear it on all the ancestor entries. Does nothing if
+ * @xas has not been walked to an entry, or is in an error state.
+ */
+void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned int offset = xas->xa_offset;
+
+ if (xas_invalid(xas))
+ return;
+
+ while (node) {
+ if (!node_clear_mark(node, offset, mark))
+ return;
+ if (node_any_mark(node, mark))
+ return;
+
+ offset = node->offset;
+ node = xa_parent_locked(xas->xa, node);
+ }
+
+ if (xa_marked(xas->xa, mark))
+ xa_mark_clear(xas->xa, mark);
+}
+EXPORT_SYMBOL_GPL(xas_clear_mark);
+
+/**
+ * xas_init_marks() - Initialise all marks for the entry
+ * @xas: Array operations state.
+ *
+ * Initialise all marks for the entry specified by @xas. If we're tracking
+ * free entries with a mark, we need to set it on all entries. All other
+ * marks are cleared.
+ *
+ * This implementation is not as efficient as it could be; we may walk
+ * up the tree multiple times.
+ */
+void xas_init_marks(const struct xa_state *xas)
+{
+ xa_mark_t mark = 0;
+
+ for (;;) {
+ if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
+ xas_set_mark(xas, mark);
+ else
+ xas_clear_mark(xas, mark);
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+EXPORT_SYMBOL_GPL(xas_init_marks);
+
+#ifdef CONFIG_XARRAY_MULTI
+static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
+{
+ unsigned int marks = 0;
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (node_get_mark(node, offset, mark))
+ marks |= 1 << (__force unsigned int)mark;
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+
+ return marks;
+}
+
+static void node_set_marks(struct xa_node *node, unsigned int offset,
+ struct xa_node *child, unsigned int marks)
+{
+ xa_mark_t mark = XA_MARK_0;
+
+ for (;;) {
+ if (marks & (1 << (__force unsigned int)mark)) {
+ node_set_mark(node, offset, mark);
+ if (child)
+ node_mark_all(child, mark);
+ }
+ if (mark == XA_MARK_MAX)
+ break;
+ mark_inc(mark);
+ }
+}
+
+/**
+ * xas_split_alloc() - Allocate memory for splitting an entry.
+ * @xas: XArray operation state.
+ * @entry: New entry which will be stored in the array.
+ * @order: Current entry order.
+ * @gfp: Memory allocation flags.
+ *
+ * This function should be called before calling xas_split().
+ * If necessary, it will allocate new nodes (and fill them with @entry)
+ * to prepare for the upcoming split of an entry of @order size into
+ * entries of the order stored in the @xas.
+ *
+ * Context: May sleep if @gfp flags permit.
+ */
+void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
+ gfp_t gfp)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int mask = xas->xa_sibs;
+
+ /* XXX: no support for splitting really large entries yet */
+ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
+ goto nomem;
+ if (xas->xa_shift + XA_CHUNK_SHIFT > order)
+ return;
+
+ do {
+ unsigned int i;
+ void *sibling = NULL;
+ struct xa_node *node;
+
+ node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
+ if (!node)
+ goto nomem;
+ node->array = xas->xa;
+ for (i = 0; i < XA_CHUNK_SIZE; i++) {
+ if ((i & mask) == 0) {
+ RCU_INIT_POINTER(node->slots[i], entry);
+ sibling = xa_mk_sibling(i);
+ } else {
+ RCU_INIT_POINTER(node->slots[i], sibling);
+ }
+ }
+ RCU_INIT_POINTER(node->parent, xas->xa_alloc);
+ xas->xa_alloc = node;
+ } while (sibs-- > 0);
+
+ return;
+nomem:
+ xas_destroy(xas);
+ xas_set_err(xas, -ENOMEM);
+}
+EXPORT_SYMBOL_GPL(xas_split_alloc);
+
+/**
+ * xas_split() - Split a multi-index entry into smaller entries.
+ * @xas: XArray operation state.
+ * @entry: New entry to store in the array.
+ * @order: Current entry order.
+ *
+ * The size of the new entries is set in @xas. The value in @entry is
+ * copied to all the replacement entries.
+ *
+ * Context: Any context. The caller should hold the xa_lock.
+ */
+void xas_split(struct xa_state *xas, void *entry, unsigned int order)
+{
+ unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
+ unsigned int offset, marks;
+ struct xa_node *node;
+ void *curr = xas_load(xas);
+ int values = 0;
+
+ node = xas->xa_node;
+ if (xas_top(node))
+ return;
+
+ marks = node_get_marks(node, xas->xa_offset);
+
+ offset = xas->xa_offset + sibs;
+ do {
+ if (xas->xa_shift < node->shift) {
+ struct xa_node *child = xas->xa_alloc;
+
+ xas->xa_alloc = rcu_dereference_raw(child->parent);
+ child->shift = node->shift - XA_CHUNK_SHIFT;
+ child->offset = offset;
+ child->count = XA_CHUNK_SIZE;
+ child->nr_values = xa_is_value(entry) ?
+ XA_CHUNK_SIZE : 0;
+ RCU_INIT_POINTER(child->parent, node);
+ node_set_marks(node, offset, child, marks);
+ rcu_assign_pointer(node->slots[offset],
+ xa_mk_node(child));
+ if (xa_is_value(curr))
+ values--;
+ xas_update(xas, child);
+ } else {
+ unsigned int canon = offset - xas->xa_sibs;
+
+ node_set_marks(node, canon, NULL, marks);
+ rcu_assign_pointer(node->slots[canon], entry);
+ while (offset > canon)
+ rcu_assign_pointer(node->slots[offset--],
+ xa_mk_sibling(canon));
+ values += (xa_is_value(entry) - xa_is_value(curr)) *
+ (xas->xa_sibs + 1);
+ }
+ } while (offset-- > xas->xa_offset);
+
+ node->nr_values += values;
+ xas_update(xas, node);
+}
+EXPORT_SYMBOL_GPL(xas_split);
+#endif
+
+/**
+ * xas_pause() - Pause a walk to drop a lock.
+ * @xas: XArray operation state.
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @xas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call xas_pause(), the xa_for_each()
+ * iterator may be more appropriate.
+ *
+ * Note that xas_pause() only works for forward iteration. If a user needs
+ * to pause a reverse iteration, we will need a xas_pause_rev().
+ */
+void xas_pause(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+
+ if (xas_invalid(xas))
+ return;
+
+ xas->xa_node = XAS_RESTART;
+ if (node) {
+ unsigned long offset = xas->xa_offset;
+ while (++offset < XA_CHUNK_SIZE) {
+ if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
+ break;
+ }
+ xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ if (xas->xa_index == 0)
+ xas->xa_node = XAS_BOUNDS;
+ } else {
+ xas->xa_index++;
+ }
+}
+EXPORT_SYMBOL_GPL(xas_pause);
+
+/*
+ * __xas_prev() - Find the previous entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_prev() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_prev(struct xa_state *xas)
+{
+ void *entry;
+
+ if (!xas_frozen(xas->xa_node))
+ xas->xa_index--;
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ if (xas_not_node(xas->xa_node))
+ return xas_load(xas);
+
+ if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+ xas->xa_offset--;
+
+ while (xas->xa_offset == 255) {
+ xas->xa_offset = xas->xa_node->offset - 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ }
+
+ for (;;) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+}
+EXPORT_SYMBOL_GPL(__xas_prev);
+
+/*
+ * __xas_next() - Find the next entry in the XArray.
+ * @xas: XArray operation state.
+ *
+ * Helper function for xas_next() which handles all the complex cases
+ * out of line.
+ */
+void *__xas_next(struct xa_state *xas)
+{
+ void *entry;
+
+ if (!xas_frozen(xas->xa_node))
+ xas->xa_index++;
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ if (xas_not_node(xas->xa_node))
+ return xas_load(xas);
+
+ if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
+ xas->xa_offset++;
+
+ while (xas->xa_offset == XA_CHUNK_SIZE) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ return set_bounds(xas);
+ }
+
+ for (;;) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!xa_is_node(entry))
+ return entry;
+
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+}
+EXPORT_SYMBOL_GPL(__xas_next);
+
+/**
+ * xas_find() - Find the next present entry in the XArray.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ *
+ * If the @xas has not yet been walked to an entry, return the entry
+ * which has an index >= xas.xa_index. If it has been walked, the entry
+ * currently being pointed at has been processed, and so we move to the
+ * next entry.
+ *
+ * If no entry is found and the array is smaller than @max, the iterator
+ * is set to the smallest index not yet in the array. This allows @xas
+ * to be immediately passed to xas_store().
+ *
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xas_find(struct xa_state *xas, unsigned long max)
+{
+ void *entry;
+
+ if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
+ return NULL;
+ if (xas->xa_index > max)
+ return set_bounds(xas);
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+ return set_bounds(xas);
+ } else if (xas->xa_node == XAS_RESTART) {
+ entry = xas_load(xas);
+ if (entry || xas_not_node(xas->xa_node))
+ return entry;
+ } else if (!xas->xa_node->shift &&
+ xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
+ xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
+ }
+
+ xas_next_offset(xas);
+
+ while (xas->xa_node && (xas->xa_index <= max)) {
+ if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ continue;
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (xa_is_node(entry)) {
+ xas->xa_node = xa_to_node(entry);
+ xas->xa_offset = 0;
+ continue;
+ }
+ if (entry && !xa_is_sibling(entry))
+ return entry;
+
+ xas_next_offset(xas);
+ }
+
+ if (!xas->xa_node)
+ xas->xa_node = XAS_BOUNDS;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find);
+
+/**
+ * xas_find_marked() - Find the next marked entry in the XArray.
+ * @xas: XArray operation state.
+ * @max: Highest index to return.
+ * @mark: Mark number to search for.
+ *
+ * If the @xas has not yet been walked to an entry, return the marked entry
+ * which has an index >= xas.xa_index. If it has been walked, the entry
+ * currently being pointed at has been processed, and so we return the
+ * first marked entry with an index > xas.xa_index.
+ *
+ * If no marked entry is found and the array is smaller than @max, @xas is
+ * set to the bounds state and xas->xa_index is set to the smallest index
+ * not yet in the array. This allows @xas to be immediately passed to
+ * xas_store().
+ *
+ * If no entry is found before @max is reached, @xas is set to the restart
+ * state.
+ *
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
+{
+ bool advance = true;
+ unsigned int offset;
+ void *entry;
+
+ if (xas_error(xas))
+ return NULL;
+ if (xas->xa_index > max)
+ goto max;
+
+ if (!xas->xa_node) {
+ xas->xa_index = 1;
+ goto out;
+ } else if (xas_top(xas->xa_node)) {
+ advance = false;
+ entry = xa_head(xas->xa);
+ xas->xa_node = NULL;
+ if (xas->xa_index > max_index(entry))
+ goto out;
+ if (!xa_is_node(entry)) {
+ if (xa_marked(xas->xa, mark))
+ return entry;
+ xas->xa_index = 1;
+ goto out;
+ }
+ xas->xa_node = xa_to_node(entry);
+ xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
+ }
+
+ while (xas->xa_index <= max) {
+ if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
+ xas->xa_offset = xas->xa_node->offset + 1;
+ xas->xa_node = xa_parent(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ break;
+ advance = false;
+ continue;
+ }
+
+ if (!advance) {
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (xa_is_sibling(entry)) {
+ xas->xa_offset = xa_to_sibling(entry);
+ xas_move_index(xas, xas->xa_offset);
+ }
+ }
+
+ offset = xas_find_chunk(xas, advance, mark);
+ if (offset > xas->xa_offset) {
+ advance = false;
+ xas_move_index(xas, offset);
+ /* Mind the wrap */
+ if ((xas->xa_index - 1) >= max)
+ goto max;
+ xas->xa_offset = offset;
+ if (offset == XA_CHUNK_SIZE)
+ continue;
+ }
+
+ entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
+ if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
+ continue;
+ if (!xa_is_node(entry))
+ return entry;
+ xas->xa_node = xa_to_node(entry);
+ xas_set_offset(xas);
+ }
+
+out:
+ if (xas->xa_index > max)
+ goto max;
+ return set_bounds(xas);
+max:
+ xas->xa_node = XAS_RESTART;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find_marked);
+
+/**
+ * xas_find_conflict() - Find the next present entry in a range.
+ * @xas: XArray operation state.
+ *
+ * The @xas describes both a range and a position within that range.
+ *
+ * Context: Any context. Expects xa_lock to be held.
+ * Return: The next entry in the range covered by @xas or %NULL.
+ */
+void *xas_find_conflict(struct xa_state *xas)
+{
+ void *curr;
+
+ if (xas_error(xas))
+ return NULL;
+
+ if (!xas->xa_node)
+ return NULL;
+
+ if (xas_top(xas->xa_node)) {
+ curr = xas_start(xas);
+ if (!curr)
+ return NULL;
+ while (xa_is_node(curr)) {
+ struct xa_node *node = xa_to_node(curr);
+ curr = xas_descend(xas, node);
+ }
+ if (curr)
+ return curr;
+ }
+
+ if (xas->xa_node->shift > xas->xa_shift)
+ return NULL;
+
+ for (;;) {
+ if (xas->xa_node->shift == xas->xa_shift) {
+ if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
+ break;
+ } else if (xas->xa_offset == XA_CHUNK_MASK) {
+ xas->xa_offset = xas->xa_node->offset;
+ xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
+ if (!xas->xa_node)
+ break;
+ continue;
+ }
+ curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
+ if (xa_is_sibling(curr))
+ continue;
+ while (xa_is_node(curr)) {
+ xas->xa_node = xa_to_node(curr);
+ xas->xa_offset = 0;
+ curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
+ }
+ if (curr)
+ return curr;
+ }
+ xas->xa_offset -= xas->xa_sibs;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xas_find_conflict);
+
+/**
+ * xa_load() - Load an entry from an XArray.
+ * @xa: XArray.
+ * @index: index into array.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The entry at @index in @xa.
+ */
+void *xa_load(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_load(&xas);
+ if (xa_is_zero(entry))
+ entry = NULL;
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ return entry;
+}
+EXPORT_SYMBOL(xa_load);
+
+static void *xas_result(struct xa_state *xas, void *curr)
+{
+ if (xa_is_zero(curr))
+ return NULL;
+ if (xas_error(xas))
+ curr = xas->xa_node;
+ return curr;
+}
+
+/**
+ * __xa_erase() - Erase this entry from the XArray while locked.
+ * @xa: XArray.
+ * @index: Index into array.
+ *
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ * Return: The entry which used to be at this index.
+ */
+void *__xa_erase(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ return xas_result(&xas, xas_store(&xas, NULL));
+}
+EXPORT_SYMBOL(__xa_erase);
+
+/**
+ * xa_erase() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * Return: The entry which used to be at this index.
+ */
+void *xa_erase(struct xarray *xa, unsigned long index)
+{
+ void *entry;
+
+ xa_lock(xa);
+ entry = __xa_erase(xa, index);
+ xa_unlock(xa);
+
+ return entry;
+}
+EXPORT_SYMBOL(xa_erase);
+
+/**
+ * __xa_store() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index or xa_err() if an error happened.
+ */
+void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return XA_ERROR(-EINVAL);
+ if (xa_track_free(xa) && !entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ curr = xas_store(&xas, entry);
+ if (xa_track_free(xa))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(__xa_store);
+
+/**
+ * xa_store() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from this index will return @entry.
+ * Storing into an existing multi-index entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
+ * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
+ * failed.
+ */
+void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock(xa);
+
+ return curr;
+}
+EXPORT_SYMBOL(xa_store);
+
+/**
+ * __xa_cmpxchg() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * You must already be holding the xa_lock when calling this function.
+ * It will drop the lock if needed to allocate memory, and then reacquire
+ * it afterwards.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: The old entry at this index or xa_err() if an error happened.
+ */
+void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return XA_ERROR(-EINVAL);
+
+ do {
+ curr = xas_load(&xas);
+ if (curr == old) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa) && entry && !curr)
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ }
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, curr);
+}
+EXPORT_SYMBOL(__xa_cmpxchg);
+
+/**
+ * __xa_insert() - Store this entry in the XArray if no entry is present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the store succeeded. -EBUSY if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, index);
+ void *curr;
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ curr = xas_load(&xas);
+ if (!curr) {
+ xas_store(&xas, entry);
+ if (xa_track_free(xa))
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ xas_set_err(&xas, -EBUSY);
+ }
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_error(&xas);
+}
+EXPORT_SYMBOL(__xa_insert);
+
+#ifdef CONFIG_XARRAY_MULTI
+static void xas_set_range(struct xa_state *xas, unsigned long first,
+ unsigned long last)
+{
+ unsigned int shift = 0;
+ unsigned long sibs = last - first;
+ unsigned int offset = XA_CHUNK_MASK;
+
+ xas_set(xas, first);
+
+ while ((first & XA_CHUNK_MASK) == 0) {
+ if (sibs < XA_CHUNK_MASK)
+ break;
+ if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
+ break;
+ shift += XA_CHUNK_SHIFT;
+ if (offset == XA_CHUNK_MASK)
+ offset = sibs & XA_CHUNK_MASK;
+ sibs >>= XA_CHUNK_SHIFT;
+ first >>= XA_CHUNK_SHIFT;
+ }
+
+ offset = first & XA_CHUNK_MASK;
+ if (offset + sibs > XA_CHUNK_MASK)
+ sibs = XA_CHUNK_MASK - offset;
+ if ((((first + sibs + 1) << shift) - 1) > last)
+ sibs -= 1;
+
+ xas->xa_shift = shift;
+ xas->xa_sibs = sibs;
+}
+
+/**
+ * xa_store_range() - Store this entry at a range of indices in the XArray.
+ * @xa: XArray.
+ * @first: First index to affect.
+ * @last: Last index to affect.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * After this function returns, loads from any index between @first and @last,
+ * inclusive will return @entry.
+ * Storing into an existing multi-index entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
+ *
+ * Context: Process context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
+ * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
+ */
+void *xa_store_range(struct xarray *xa, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+
+ if (WARN_ON_ONCE(xa_is_internal(entry)))
+ return XA_ERROR(-EINVAL);
+ if (last < first)
+ return XA_ERROR(-EINVAL);
+
+ do {
+ xas_lock(&xas);
+ if (entry) {
+ unsigned int order = BITS_PER_LONG;
+ if (last + 1)
+ order = __ffs(last + 1);
+ xas_set_order(&xas, last, order);
+ xas_create(&xas, true);
+ if (xas_error(&xas))
+ goto unlock;
+ }
+ do {
+ xas_set_range(&xas, first, last);
+ xas_store(&xas, entry);
+ if (xas_error(&xas))
+ goto unlock;
+ first += xas_size(&xas);
+ } while (first <= last);
+unlock:
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, gfp));
+
+ return xas_result(&xas, NULL);
+}
+EXPORT_SYMBOL(xa_store_range);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+ int order = 0;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+
+ if (!entry)
+ goto unlock;
+
+ if (!xas.xa_node)
+ goto unlock;
+
+ for (;;) {
+ unsigned int slot = xas.xa_offset + (1 << order);
+
+ if (slot >= XA_CHUNK_SIZE)
+ break;
+ if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ break;
+ order++;
+ }
+
+ order += xas.xa_node->shift;
+unlock:
+ rcu_read_unlock();
+
+ return order;
+}
+EXPORT_SYMBOL(xa_get_order);
+#endif /* CONFIG_XARRAY_MULTI */
+
+/**
+ * __xa_alloc() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @limit: Range for allocated ID.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ *
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 on success, -ENOMEM if memory could not be allocated or
+ * -EBUSY if there are no free entries in @limit.
+ */
+int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, gfp_t gfp)
+{
+ XA_STATE(xas, xa, 0);
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!xa_track_free(xa)))
+ return -EINVAL;
+
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
+ do {
+ xas.xa_index = limit.min;
+ xas_find_marked(&xas, limit.max, XA_FREE_MARK);
+ if (xas.xa_node == XAS_RESTART)
+ xas_set_err(&xas, -EBUSY);
+ else
+ *id = xas.xa_index;
+ xas_store(&xas, entry);
+ xas_clear_mark(&xas, XA_FREE_MARK);
+ } while (__xas_nomem(&xas, gfp));
+
+ return xas_error(&xas);
+}
+EXPORT_SYMBOL(__xa_alloc);
+
+/**
+ * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @entry: New entry.
+ * @limit: Range of allocated ID.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ * The search for an empty entry will start at @next and will wrap
+ * around if necessary.
+ *
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the allocation succeeded without wrapping. 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated or -EBUSY if there are no free entries in @limit.
+ */
+int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, u32 *next, gfp_t gfp)
+{
+ u32 min = limit.min;
+ int ret;
+
+ limit.min = max(min, *next);
+ ret = __xa_alloc(xa, id, entry, limit, gfp);
+ if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
+ xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
+ ret = 1;
+ }
+
+ if (ret < 0 && limit.min > min) {
+ limit.min = min;
+ ret = __xa_alloc(xa, id, entry, limit, gfp);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ if (ret >= 0) {
+ *next = *id + 1;
+ if (*next == 0)
+ xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__xa_alloc_cyclic);
+
+/**
+ * __xa_set_mark() - Set this mark on this entry while locked.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Attempting to set a mark on a %NULL entry does not succeed.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ */
+void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry = xas_load(&xas);
+
+ if (entry)
+ xas_set_mark(&xas, mark);
+}
+EXPORT_SYMBOL(__xa_set_mark);
+
+/**
+ * __xa_clear_mark() - Clear this mark on this entry while locked.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry.
+ */
+void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry = xas_load(&xas);
+
+ if (entry)
+ xas_clear_mark(&xas, mark);
+}
+EXPORT_SYMBOL(__xa_clear_mark);
+
+/**
+ * xa_get_mark() - Inquire whether this mark is set on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * This function uses the RCU read lock, so the result may be out of date
+ * by the time it returns. If you need the result to be stable, use a lock.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: True if the entry at @index has this mark set, false if it doesn't.
+ */
+bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ XA_STATE(xas, xa, index);
+ void *entry;
+
+ rcu_read_lock();
+ entry = xas_start(&xas);
+ while (xas_get_mark(&xas, mark)) {
+ if (!xa_is_node(entry))
+ goto found;
+ entry = xas_descend(&xas, xa_to_node(entry));
+ }
+ rcu_read_unlock();
+ return false;
+ found:
+ rcu_read_unlock();
+ return true;
+}
+EXPORT_SYMBOL(xa_get_mark);
+
+/**
+ * xa_set_mark() - Set this mark on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Attempting to set a mark on a %NULL entry does not succeed.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ */
+void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ xa_lock(xa);
+ __xa_set_mark(xa, index, mark);
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL(xa_set_mark);
+
+/**
+ * xa_clear_mark() - Clear this mark on this entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ * @mark: Mark number.
+ *
+ * Clearing a mark always succeeds.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ */
+void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
+{
+ xa_lock(xa);
+ __xa_clear_mark(xa, index, mark);
+ xa_unlock(xa);
+}
+EXPORT_SYMBOL(xa_clear_mark);
+
+/**
+ * xa_find() - Search the XArray for an entry.
+ * @xa: XArray.
+ * @indexp: Pointer to an index.
+ * @max: Maximum index to search to.
+ * @filter: Selection criterion.
+ *
+ * Finds the entry in @xa which matches the @filter, and has the lowest
+ * index that is at least @indexp and no more than @max.
+ * If an entry is found, @indexp is updated to be the index of the entry.
+ * This function is protected by the RCU read lock, so it may not find
+ * entries which are being simultaneously added. It will not return an
+ * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The entry, if found, otherwise %NULL.
+ */
+void *xa_find(struct xarray *xa, unsigned long *indexp,
+ unsigned long max, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp);
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+
+ if (entry)
+ *indexp = xas.xa_index;
+ return entry;
+}
+EXPORT_SYMBOL(xa_find);
+
+static bool xas_sibling(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned long mask;
+
+ if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
+ return false;
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
+ return (xas->xa_index & mask) >
+ ((unsigned long)xas->xa_offset << node->shift);
+}
+
+/**
+ * xa_find_after() - Search the XArray for a present entry.
+ * @xa: XArray.
+ * @indexp: Pointer to an index.
+ * @max: Maximum index to search to.
+ * @filter: Selection criterion.
+ *
+ * Finds the entry in @xa which matches the @filter and has the lowest
+ * index that is above @indexp and no more than @max.
+ * If an entry is found, @indexp is updated to be the index of the entry.
+ * This function is protected by the RCU read lock, so it may miss entries
+ * which are being simultaneously added. It will not return an
+ * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The pointer, if found, otherwise %NULL.
+ */
+void *xa_find_after(struct xarray *xa, unsigned long *indexp,
+ unsigned long max, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, *indexp + 1);
+ void *entry;
+
+ if (xas.xa_index == 0)
+ return NULL;
+
+ rcu_read_lock();
+ for (;;) {
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ entry = xas_find_marked(&xas, max, filter);
+ else
+ entry = xas_find(&xas, max);
+
+ if (xas_invalid(&xas))
+ break;
+ if (xas_sibling(&xas))
+ continue;
+ if (!xas_retry(&xas, entry))
+ break;
+ }
+ rcu_read_unlock();
+
+ if (entry)
+ *indexp = xas.xa_index;
+ return entry;
+}
+EXPORT_SYMBOL(xa_find_after);
+
+static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
+ unsigned long max, unsigned int n)
+{
+ void *entry;
+ unsigned int i = 0;
+
+ rcu_read_lock();
+ xas_for_each(xas, entry, max) {
+ if (xas_retry(xas, entry))
+ continue;
+ dst[i++] = entry;
+ if (i == n)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i;
+}
+
+static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
+ unsigned long max, unsigned int n, xa_mark_t mark)
+{
+ void *entry;
+ unsigned int i = 0;
+
+ rcu_read_lock();
+ xas_for_each_marked(xas, entry, max, mark) {
+ if (xas_retry(xas, entry))
+ continue;
+ dst[i++] = entry;
+ if (i == n)
+ break;
+ }
+ rcu_read_unlock();
+
+ return i;
+}
+
+/**
+ * xa_extract() - Copy selected entries from the XArray into a normal array.
+ * @xa: The source XArray to copy from.
+ * @dst: The buffer to copy entries into.
+ * @start: The first index in the XArray eligible to be selected.
+ * @max: The last index in the XArray eligible to be selected.
+ * @n: The maximum number of entries to copy.
+ * @filter: Selection criterion.
+ *
+ * Copies up to @n entries that match @filter from the XArray. The
+ * copied entries will have indices between @start and @max, inclusive.
+ *
+ * The @filter may be an XArray mark value, in which case entries which are
+ * marked with that mark will be copied. It may also be %XA_PRESENT, in
+ * which case all entries which are not %NULL will be copied.
+ *
+ * The entries returned may not represent a snapshot of the XArray at a
+ * moment in time. For example, if another thread stores to index 5, then
+ * index 10, calling xa_extract() may return the old contents of index 5
+ * and the new contents of index 10. Indices not modified while this
+ * function is running will not be skipped.
+ *
+ * If you need stronger guarantees, holding the xa_lock across calls to this
+ * function will prevent concurrent modification.
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ * Return: The number of entries copied.
+ */
+unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
+ unsigned long max, unsigned int n, xa_mark_t filter)
+{
+ XA_STATE(xas, xa, start);
+
+ if (!n)
+ return 0;
+
+ if ((__force unsigned int)filter < XA_MAX_MARKS)
+ return xas_extract_marked(&xas, dst, max, n, filter);
+ return xas_extract_present(&xas, dst, max, n);
+}
+EXPORT_SYMBOL(xa_extract);
+
+/**
+ * xa_delete_node() - Private interface for workingset code.
+ * @node: Node to be removed from the tree.
+ * @update: Function to call to update ancestor nodes.
+ *
+ * Context: xa_lock must be held on entry and will not be released.
+ */
+void xa_delete_node(struct xa_node *node, xa_update_node_t update)
+{
+ struct xa_state xas = {
+ .xa = node->array,
+ .xa_index = (unsigned long)node->offset <<
+ (node->shift + XA_CHUNK_SHIFT),
+ .xa_shift = node->shift + XA_CHUNK_SHIFT,
+ .xa_offset = node->offset,
+ .xa_node = xa_parent_locked(node->array, node),
+ .xa_update = update,
+ };
+
+ xas_store(&xas, NULL);
+}
+EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
+
+/**
+ * xa_destroy() - Free all internal data structures.
+ * @xa: XArray.
+ *
+ * After calling this function, the XArray is empty and has freed all memory
+ * allocated for its internal data structures. You are responsible for
+ * freeing the objects referenced by the XArray.
+ *
+ * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
+ */
+void xa_destroy(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long flags;
+ void *entry;
+
+ xas.xa_node = NULL;
+ xas_lock_irqsave(&xas, flags);
+ entry = xa_head_locked(xa);
+ RCU_INIT_POINTER(xa->xa_head, NULL);
+ xas_init_marks(&xas);
+ if (xa_zero_busy(xa))
+ xa_mark_clear(xa, XA_FREE_MARK);
+ /* lockdep checks we're still holding the lock in xas_free_nodes() */
+ if (xa_is_node(entry))
+ xas_free_nodes(&xas, xa_to_node(entry));
+ xas_unlock_irqrestore(&xas, flags);
+}
+EXPORT_SYMBOL(xa_destroy);
+
+#ifdef XA_DEBUG
+void xa_dump_node(const struct xa_node *node)
+{
+ unsigned i, j;
+
+ if (!node)
+ return;
+ if ((unsigned long)node & 3) {
+ pr_cont("node %px\n", node);
+ return;
+ }
+
+ pr_cont("node %px %s %d parent %px shift %d count %d values %d "
+ "array %px list %px %px marks",
+ node, node->parent ? "offset" : "max", node->offset,
+ node->parent, node->shift, node->count, node->nr_values,
+ node->array, node->private_list.prev, node->private_list.next);
+ for (i = 0; i < XA_MAX_MARKS; i++)
+ for (j = 0; j < XA_MARK_LONGS; j++)
+ pr_cont(" %lx", node->marks[i][j]);
+ pr_cont("\n");
+}
+
+void xa_dump_index(unsigned long index, unsigned int shift)
+{
+ if (!shift)
+ pr_info("%lu: ", index);
+ else if (shift >= BITS_PER_LONG)
+ pr_info("0-%lu: ", ~0UL);
+ else
+ pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
+}
+
+void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
+{
+ if (!entry)
+ return;
+
+ xa_dump_index(index, shift);
+
+ if (xa_is_node(entry)) {
+ if (shift == 0) {
+ pr_cont("%px\n", entry);
+ } else {
+ unsigned long i;
+ struct xa_node *node = xa_to_node(entry);
+ xa_dump_node(node);
+ for (i = 0; i < XA_CHUNK_SIZE; i++)
+ xa_dump_entry(node->slots[i],
+ index + (i << node->shift), node->shift);
+ }
+ } else if (xa_is_value(entry))
+ pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
+ else if (!xa_is_internal(entry))
+ pr_cont("%px\n", entry);
+ else if (xa_is_retry(entry))
+ pr_cont("retry (%ld)\n", xa_to_internal(entry));
+ else if (xa_is_sibling(entry))
+ pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
+ else if (xa_is_zero(entry))
+ pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else
+ pr_cont("UNKNOWN ENTRY (%px)\n", entry);
+}
+
+void xa_dump(const struct xarray *xa)
+{
+ void *entry = xa->xa_head;
+ unsigned int shift = 0;
+
+ pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
+ xa->xa_flags, xa_marked(xa, XA_MARK_0),
+ xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
+ if (xa_is_node(entry))
+ shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
+ xa_dump_entry(entry, 0, shift);
+}
+#endif
diff --git a/lib/xxhash.c b/lib/xxhash.c
new file mode 100644
index 000000000..d5bb9ff10
--- /dev/null
+++ b/lib/xxhash.c
@@ -0,0 +1,500 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Copyright (C) 2012-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at:
+ * - xxHash homepage: https://cyan4973.github.io/xxHash/
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+#include <asm/unaligned.h>
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/xxhash.h>
+
+/*-*************************************
+ * Macros
+ **************************************/
+#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r)))
+#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r)))
+
+#ifdef __LITTLE_ENDIAN
+# define XXH_CPU_LITTLE_ENDIAN 1
+#else
+# define XXH_CPU_LITTLE_ENDIAN 0
+#endif
+
+/*-*************************************
+ * Constants
+ **************************************/
+static const uint32_t PRIME32_1 = 2654435761U;
+static const uint32_t PRIME32_2 = 2246822519U;
+static const uint32_t PRIME32_3 = 3266489917U;
+static const uint32_t PRIME32_4 = 668265263U;
+static const uint32_t PRIME32_5 = 374761393U;
+
+static const uint64_t PRIME64_1 = 11400714785074694791ULL;
+static const uint64_t PRIME64_2 = 14029467366897019727ULL;
+static const uint64_t PRIME64_3 = 1609587929392839161ULL;
+static const uint64_t PRIME64_4 = 9650029242287828579ULL;
+static const uint64_t PRIME64_5 = 2870177450012600261ULL;
+
+/*-**************************
+ * Utils
+ ***************************/
+void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+EXPORT_SYMBOL(xxh32_copy_state);
+
+void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+EXPORT_SYMBOL(xxh64_copy_state);
+
+/*-***************************
+ * Simple Hash Functions
+ ****************************/
+static uint32_t xxh32_round(uint32_t seed, const uint32_t input)
+{
+ seed += input * PRIME32_2;
+ seed = xxh_rotl32(seed, 13);
+ seed *= PRIME32_1;
+ return seed;
+}
+
+uint32_t xxh32(const void *input, const size_t len, const uint32_t seed)
+{
+ const uint8_t *p = (const uint8_t *)input;
+ const uint8_t *b_end = p + len;
+ uint32_t h32;
+
+ if (len >= 16) {
+ const uint8_t *const limit = b_end - 16;
+ uint32_t v1 = seed + PRIME32_1 + PRIME32_2;
+ uint32_t v2 = seed + PRIME32_2;
+ uint32_t v3 = seed + 0;
+ uint32_t v4 = seed - PRIME32_1;
+
+ do {
+ v1 = xxh32_round(v1, get_unaligned_le32(p));
+ p += 4;
+ v2 = xxh32_round(v2, get_unaligned_le32(p));
+ p += 4;
+ v3 = xxh32_round(v3, get_unaligned_le32(p));
+ p += 4;
+ v4 = xxh32_round(v4, get_unaligned_le32(p));
+ p += 4;
+ } while (p <= limit);
+
+ h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) +
+ xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (uint32_t)len;
+
+ while (p + 4 <= b_end) {
+ h32 += get_unaligned_le32(p) * PRIME32_3;
+ h32 = xxh_rotl32(h32, 17) * PRIME32_4;
+ p += 4;
+ }
+
+ while (p < b_end) {
+ h32 += (*p) * PRIME32_5;
+ h32 = xxh_rotl32(h32, 11) * PRIME32_1;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+EXPORT_SYMBOL(xxh32);
+
+static uint64_t xxh64_round(uint64_t acc, const uint64_t input)
+{
+ acc += input * PRIME64_2;
+ acc = xxh_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val)
+{
+ val = xxh64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+uint64_t xxh64(const void *input, const size_t len, const uint64_t seed)
+{
+ const uint8_t *p = (const uint8_t *)input;
+ const uint8_t *const b_end = p + len;
+ uint64_t h64;
+
+ if (len >= 32) {
+ const uint8_t *const limit = b_end - 32;
+ uint64_t v1 = seed + PRIME64_1 + PRIME64_2;
+ uint64_t v2 = seed + PRIME64_2;
+ uint64_t v3 = seed + 0;
+ uint64_t v4 = seed - PRIME64_1;
+
+ do {
+ v1 = xxh64_round(v1, get_unaligned_le64(p));
+ p += 8;
+ v2 = xxh64_round(v2, get_unaligned_le64(p));
+ p += 8;
+ v3 = xxh64_round(v3, get_unaligned_le64(p));
+ p += 8;
+ v4 = xxh64_round(v4, get_unaligned_le64(p));
+ p += 8;
+ } while (p <= limit);
+
+ h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
+ xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
+ h64 = xxh64_merge_round(h64, v1);
+ h64 = xxh64_merge_round(h64, v2);
+ h64 = xxh64_merge_round(h64, v3);
+ h64 = xxh64_merge_round(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (uint64_t)len;
+
+ while (p + 8 <= b_end) {
+ const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
+
+ h64 ^= k1;
+ h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
+ p += 8;
+ }
+
+ if (p + 4 <= b_end) {
+ h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
+ h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+ p += 4;
+ }
+
+ while (p < b_end) {
+ h64 ^= (*p) * PRIME64_5;
+ h64 = xxh_rotl64(h64, 11) * PRIME64_1;
+ p++;
+ }
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+
+ return h64;
+}
+EXPORT_SYMBOL(xxh64);
+
+/*-**************************************************
+ * Advanced Hash Functions
+ ***************************************************/
+void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed)
+{
+ /* use a local state for memcpy() to avoid strict-aliasing warnings */
+ struct xxh32_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ memcpy(statePtr, &state, sizeof(state));
+}
+EXPORT_SYMBOL(xxh32_reset);
+
+void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed)
+{
+ /* use a local state for memcpy() to avoid strict-aliasing warnings */
+ struct xxh64_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ memcpy(statePtr, &state, sizeof(state));
+}
+EXPORT_SYMBOL(xxh64_reset);
+
+int xxh32_update(struct xxh32_state *state, const void *input, const size_t len)
+{
+ const uint8_t *p = (const uint8_t *)input;
+ const uint8_t *const b_end = p + len;
+
+ if (input == NULL)
+ return -EINVAL;
+
+ state->total_len_32 += (uint32_t)len;
+ state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ memcpy((uint8_t *)(state->mem32) + state->memsize, input, len);
+ state->memsize += (uint32_t)len;
+ return 0;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ const uint32_t *p32 = state->mem32;
+
+ memcpy((uint8_t *)(state->mem32) + state->memsize, input,
+ 16 - state->memsize);
+
+ state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32));
+ p32++;
+ state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32));
+ p32++;
+ state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32));
+ p32++;
+ state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32));
+ p32++;
+
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= b_end - 16) {
+ const uint8_t *const limit = b_end - 16;
+ uint32_t v1 = state->v1;
+ uint32_t v2 = state->v2;
+ uint32_t v3 = state->v3;
+ uint32_t v4 = state->v4;
+
+ do {
+ v1 = xxh32_round(v1, get_unaligned_le32(p));
+ p += 4;
+ v2 = xxh32_round(v2, get_unaligned_le32(p));
+ p += 4;
+ v3 = xxh32_round(v3, get_unaligned_le32(p));
+ p += 4;
+ v4 = xxh32_round(v4, get_unaligned_le32(p));
+ p += 4;
+ } while (p <= limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < b_end) {
+ memcpy(state->mem32, p, (size_t)(b_end-p));
+ state->memsize = (uint32_t)(b_end-p);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(xxh32_update);
+
+uint32_t xxh32_digest(const struct xxh32_state *state)
+{
+ const uint8_t *p = (const uint8_t *)state->mem32;
+ const uint8_t *const b_end = (const uint8_t *)(state->mem32) +
+ state->memsize;
+ uint32_t h32;
+
+ if (state->large_len) {
+ h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) +
+ xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ while (p + 4 <= b_end) {
+ h32 += get_unaligned_le32(p) * PRIME32_3;
+ h32 = xxh_rotl32(h32, 17) * PRIME32_4;
+ p += 4;
+ }
+
+ while (p < b_end) {
+ h32 += (*p) * PRIME32_5;
+ h32 = xxh_rotl32(h32, 11) * PRIME32_1;
+ p++;
+ }
+
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+
+ return h32;
+}
+EXPORT_SYMBOL(xxh32_digest);
+
+int xxh64_update(struct xxh64_state *state, const void *input, const size_t len)
+{
+ const uint8_t *p = (const uint8_t *)input;
+ const uint8_t *const b_end = p + len;
+
+ if (input == NULL)
+ return -EINVAL;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ memcpy(((uint8_t *)state->mem64) + state->memsize, input, len);
+ state->memsize += (uint32_t)len;
+ return 0;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ uint64_t *p64 = state->mem64;
+
+ memcpy(((uint8_t *)p64) + state->memsize, input,
+ 32 - state->memsize);
+
+ state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64));
+ p64++;
+ state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64));
+ p64++;
+ state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64));
+ p64++;
+ state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64));
+
+ p += 32 - state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p + 32 <= b_end) {
+ const uint8_t *const limit = b_end - 32;
+ uint64_t v1 = state->v1;
+ uint64_t v2 = state->v2;
+ uint64_t v3 = state->v3;
+ uint64_t v4 = state->v4;
+
+ do {
+ v1 = xxh64_round(v1, get_unaligned_le64(p));
+ p += 8;
+ v2 = xxh64_round(v2, get_unaligned_le64(p));
+ p += 8;
+ v3 = xxh64_round(v3, get_unaligned_le64(p));
+ p += 8;
+ v4 = xxh64_round(v4, get_unaligned_le64(p));
+ p += 8;
+ } while (p <= limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < b_end) {
+ memcpy(state->mem64, p, (size_t)(b_end-p));
+ state->memsize = (uint32_t)(b_end - p);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(xxh64_update);
+
+uint64_t xxh64_digest(const struct xxh64_state *state)
+{
+ const uint8_t *p = (const uint8_t *)state->mem64;
+ const uint8_t *const b_end = (const uint8_t *)state->mem64 +
+ state->memsize;
+ uint64_t h64;
+
+ if (state->total_len >= 32) {
+ const uint64_t v1 = state->v1;
+ const uint64_t v2 = state->v2;
+ const uint64_t v3 = state->v3;
+ const uint64_t v4 = state->v4;
+
+ h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) +
+ xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18);
+ h64 = xxh64_merge_round(h64, v1);
+ h64 = xxh64_merge_round(h64, v2);
+ h64 = xxh64_merge_round(h64, v3);
+ h64 = xxh64_merge_round(h64, v4);
+ } else {
+ h64 = state->v3 + PRIME64_5;
+ }
+
+ h64 += (uint64_t)state->total_len;
+
+ while (p + 8 <= b_end) {
+ const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
+
+ h64 ^= k1;
+ h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4;
+ p += 8;
+ }
+
+ if (p + 4 <= b_end) {
+ h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1;
+ h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+ p += 4;
+ }
+
+ while (p < b_end) {
+ h64 ^= (*p) * PRIME64_5;
+ h64 = xxh_rotl64(h64, 11) * PRIME64_1;
+ p++;
+ }
+
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+
+ return h64;
+}
+EXPORT_SYMBOL(xxh64_digest);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("xxHash");
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
new file mode 100644
index 000000000..adce22ac1
--- /dev/null
+++ b/lib/xz/Kconfig
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config XZ_DEC
+ tristate "XZ decompression support"
+ select CRC32
+ help
+ LZMA2 compression algorithm and BCJ filters are supported using
+ the .xz file format as the container. For integrity checking,
+ CRC32 is supported. See Documentation/staging/xz.rst for more information.
+
+if XZ_DEC
+
+config XZ_DEC_X86
+ bool "x86 BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
+config XZ_DEC_POWERPC
+ bool "PowerPC BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
+config XZ_DEC_IA64
+ bool "IA-64 BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
+config XZ_DEC_ARM
+ bool "ARM BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
+config XZ_DEC_ARMTHUMB
+ bool "ARM-Thumb BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
+config XZ_DEC_SPARC
+ bool "SPARC BCJ filter decoder" if EXPERT
+ default y
+ select XZ_DEC_BCJ
+
+config XZ_DEC_MICROLZMA
+ bool "MicroLZMA decoder"
+ default n
+ help
+ MicroLZMA is a header format variant where the first byte
+ of a raw LZMA stream (without the end of stream marker) has
+ been replaced with a bitwise-negation of the lc/lp/pb
+ properties byte. MicroLZMA was created to be used in EROFS
+ but can be used by other things too where wasting minimal
+ amount of space for headers is important.
+
+ Unless you know that you need this, say N.
+
+endif
+
+config XZ_DEC_BCJ
+ bool
+ default n
+
+config XZ_DEC_TEST
+ tristate "XZ decompressor tester"
+ default n
+ depends on XZ_DEC
+ help
+ This allows passing .xz files to the in-kernel XZ decoder via
+ a character special file. It calculates CRC32 of the decompressed
+ data and writes diagnostics to the system log.
+
+ Unless you are developing the XZ decoder, you don't need this
+ and should say N.
diff --git a/lib/xz/Makefile b/lib/xz/Makefile
new file mode 100644
index 000000000..fa6af814a
--- /dev/null
+++ b/lib/xz/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_XZ_DEC) += xz_dec.o
+xz_dec-y := xz_dec_syms.o xz_dec_stream.o xz_dec_lzma2.o
+xz_dec-$(CONFIG_XZ_DEC_BCJ) += xz_dec_bcj.o
+
+obj-$(CONFIG_XZ_DEC_TEST) += xz_dec_test.o
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
new file mode 100644
index 000000000..88a2c35e1
--- /dev/null
+++ b/lib/xz/xz_crc32.c
@@ -0,0 +1,59 @@
+/*
+ * CRC32 using the polynomial from IEEE-802.3
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ * Igor Pavlov <https://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * This is not the fastest implementation, but it is pretty compact.
+ * The fastest versions of xz_crc32() on modern CPUs without hardware
+ * accelerated CRC instruction are 3-5 times as fast as this version,
+ * but they are bigger and use more memory for the lookup table.
+ */
+
+#include "xz_private.h"
+
+/*
+ * STATIC_RW_DATA is used in the pre-boot environment on some architectures.
+ * See <linux/decompress/mm.h> for details.
+ */
+#ifndef STATIC_RW_DATA
+# define STATIC_RW_DATA static
+#endif
+
+STATIC_RW_DATA uint32_t xz_crc32_table[256];
+
+XZ_EXTERN void xz_crc32_init(void)
+{
+ const uint32_t poly = CRC32_POLY_LE;
+
+ uint32_t i;
+ uint32_t j;
+ uint32_t r;
+
+ for (i = 0; i < 256; ++i) {
+ r = i;
+ for (j = 0; j < 8; ++j)
+ r = (r >> 1) ^ (poly & ~((r & 1) - 1));
+
+ xz_crc32_table[i] = r;
+ }
+
+ return;
+}
+
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+{
+ crc = ~crc;
+
+ while (size != 0) {
+ crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
+ --size;
+ }
+
+ return ~crc;
+}
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
new file mode 100644
index 000000000..ef449e97d
--- /dev/null
+++ b/lib/xz/xz_dec_bcj.c
@@ -0,0 +1,574 @@
+/*
+ * Branch/Call/Jump (BCJ) filter decoders
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ * Igor Pavlov <https://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+
+/*
+ * The rest of the file is inside this ifdef. It makes things a little more
+ * convenient when building without support for any BCJ filters.
+ */
+#ifdef XZ_DEC_BCJ
+
+struct xz_dec_bcj {
+ /* Type of the BCJ filter being used */
+ enum {
+ BCJ_X86 = 4, /* x86 or x86-64 */
+ BCJ_POWERPC = 5, /* Big endian only */
+ BCJ_IA64 = 6, /* Big or little endian */
+ BCJ_ARM = 7, /* Little endian only */
+ BCJ_ARMTHUMB = 8, /* Little endian only */
+ BCJ_SPARC = 9 /* Big or little endian */
+ } type;
+
+ /*
+ * Return value of the next filter in the chain. We need to preserve
+ * this information across calls, because we must not call the next
+ * filter anymore once it has returned XZ_STREAM_END.
+ */
+ enum xz_ret ret;
+
+ /* True if we are operating in single-call mode. */
+ bool single_call;
+
+ /*
+ * Absolute position relative to the beginning of the uncompressed
+ * data (in a single .xz Block). We care only about the lowest 32
+ * bits so this doesn't need to be uint64_t even with big files.
+ */
+ uint32_t pos;
+
+ /* x86 filter state */
+ uint32_t x86_prev_mask;
+
+ /* Temporary space to hold the variables from struct xz_buf */
+ uint8_t *out;
+ size_t out_pos;
+ size_t out_size;
+
+ struct {
+ /* Amount of already filtered data in the beginning of buf */
+ size_t filtered;
+
+ /* Total amount of data currently stored in buf */
+ size_t size;
+
+ /*
+ * Buffer to hold a mix of filtered and unfiltered data. This
+ * needs to be big enough to hold Alignment + 2 * Look-ahead:
+ *
+ * Type Alignment Look-ahead
+ * x86 1 4
+ * PowerPC 4 0
+ * IA-64 16 0
+ * ARM 4 0
+ * ARM-Thumb 2 2
+ * SPARC 4 0
+ */
+ uint8_t buf[16];
+ } temp;
+};
+
+#ifdef XZ_DEC_X86
+/*
+ * This is used to test the most significant byte of a memory address
+ * in an x86 instruction.
+ */
+static inline int bcj_x86_test_msbyte(uint8_t b)
+{
+ return b == 0x00 || b == 0xFF;
+}
+
+static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ static const bool mask_to_allowed_status[8]
+ = { true, true, true, false, true, false, false, false };
+
+ static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+ size_t i;
+ size_t prev_pos = (size_t)-1;
+ uint32_t prev_mask = s->x86_prev_mask;
+ uint32_t src;
+ uint32_t dest;
+ uint32_t j;
+ uint8_t b;
+
+ if (size <= 4)
+ return 0;
+
+ size -= 4;
+ for (i = 0; i < size; ++i) {
+ if ((buf[i] & 0xFE) != 0xE8)
+ continue;
+
+ prev_pos = i - prev_pos;
+ if (prev_pos > 3) {
+ prev_mask = 0;
+ } else {
+ prev_mask = (prev_mask << (prev_pos - 1)) & 7;
+ if (prev_mask != 0) {
+ b = buf[i + 4 - mask_to_bit_num[prev_mask]];
+ if (!mask_to_allowed_status[prev_mask]
+ || bcj_x86_test_msbyte(b)) {
+ prev_pos = i;
+ prev_mask = (prev_mask << 1) | 1;
+ continue;
+ }
+ }
+ }
+
+ prev_pos = i;
+
+ if (bcj_x86_test_msbyte(buf[i + 4])) {
+ src = get_unaligned_le32(buf + i + 1);
+ while (true) {
+ dest = src - (s->pos + (uint32_t)i + 5);
+ if (prev_mask == 0)
+ break;
+
+ j = mask_to_bit_num[prev_mask] * 8;
+ b = (uint8_t)(dest >> (24 - j));
+ if (!bcj_x86_test_msbyte(b))
+ break;
+
+ src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
+ }
+
+ dest &= 0x01FFFFFF;
+ dest |= (uint32_t)0 - (dest & 0x01000000);
+ put_unaligned_le32(dest, buf + i + 1);
+ i += 4;
+ } else {
+ prev_mask = (prev_mask << 1) | 1;
+ }
+ }
+
+ prev_pos = i - prev_pos;
+ s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_POWERPC
+static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t instr;
+
+ for (i = 0; i + 4 <= size; i += 4) {
+ instr = get_unaligned_be32(buf + i);
+ if ((instr & 0xFC000003) == 0x48000001) {
+ instr &= 0x03FFFFFC;
+ instr -= s->pos + (uint32_t)i;
+ instr &= 0x03FFFFFC;
+ instr |= 0x48000001;
+ put_unaligned_be32(instr, buf + i);
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_IA64
+static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ static const uint8_t branch_table[32] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 4, 4, 6, 6, 0, 0, 7, 7,
+ 4, 4, 0, 0, 4, 4, 0, 0
+ };
+
+ /*
+ * The local variables take a little bit stack space, but it's less
+ * than what LZMA2 decoder takes, so it doesn't make sense to reduce
+ * stack usage here without doing that for the LZMA2 decoder too.
+ */
+
+ /* Loop counters */
+ size_t i;
+ size_t j;
+
+ /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
+ uint32_t slot;
+
+ /* Bitwise offset of the instruction indicated by slot */
+ uint32_t bit_pos;
+
+ /* bit_pos split into byte and bit parts */
+ uint32_t byte_pos;
+ uint32_t bit_res;
+
+ /* Address part of an instruction */
+ uint32_t addr;
+
+ /* Mask used to detect which instructions to convert */
+ uint32_t mask;
+
+ /* 41-bit instruction stored somewhere in the lowest 48 bits */
+ uint64_t instr;
+
+ /* Instruction normalized with bit_res for easier manipulation */
+ uint64_t norm;
+
+ for (i = 0; i + 16 <= size; i += 16) {
+ mask = branch_table[buf[i] & 0x1F];
+ for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
+ if (((mask >> slot) & 1) == 0)
+ continue;
+
+ byte_pos = bit_pos >> 3;
+ bit_res = bit_pos & 7;
+ instr = 0;
+ for (j = 0; j < 6; ++j)
+ instr |= (uint64_t)(buf[i + j + byte_pos])
+ << (8 * j);
+
+ norm = instr >> bit_res;
+
+ if (((norm >> 37) & 0x0F) == 0x05
+ && ((norm >> 9) & 0x07) == 0) {
+ addr = (norm >> 13) & 0x0FFFFF;
+ addr |= ((uint32_t)(norm >> 36) & 1) << 20;
+ addr <<= 4;
+ addr -= s->pos + (uint32_t)i;
+ addr >>= 4;
+
+ norm &= ~((uint64_t)0x8FFFFF << 13);
+ norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
+ norm |= (uint64_t)(addr & 0x100000)
+ << (36 - 20);
+
+ instr &= (1 << bit_res) - 1;
+ instr |= norm << bit_res;
+
+ for (j = 0; j < 6; j++)
+ buf[i + j + byte_pos]
+ = (uint8_t)(instr >> (8 * j));
+ }
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARM
+static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t addr;
+
+ for (i = 0; i + 4 <= size; i += 4) {
+ if (buf[i + 3] == 0xEB) {
+ addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
+ | ((uint32_t)buf[i + 2] << 16);
+ addr <<= 2;
+ addr -= s->pos + (uint32_t)i + 8;
+ addr >>= 2;
+ buf[i] = (uint8_t)addr;
+ buf[i + 1] = (uint8_t)(addr >> 8);
+ buf[i + 2] = (uint8_t)(addr >> 16);
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARMTHUMB
+static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t addr;
+
+ for (i = 0; i + 4 <= size; i += 2) {
+ if ((buf[i + 1] & 0xF8) == 0xF0
+ && (buf[i + 3] & 0xF8) == 0xF8) {
+ addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
+ | ((uint32_t)buf[i] << 11)
+ | (((uint32_t)buf[i + 3] & 0x07) << 8)
+ | (uint32_t)buf[i + 2];
+ addr <<= 1;
+ addr -= s->pos + (uint32_t)i + 4;
+ addr >>= 1;
+ buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
+ buf[i] = (uint8_t)(addr >> 11);
+ buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
+ buf[i + 2] = (uint8_t)addr;
+ i += 2;
+ }
+ }
+
+ return i;
+}
+#endif
+
+#ifdef XZ_DEC_SPARC
+static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+ size_t i;
+ uint32_t instr;
+
+ for (i = 0; i + 4 <= size; i += 4) {
+ instr = get_unaligned_be32(buf + i);
+ if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
+ instr <<= 2;
+ instr -= s->pos + (uint32_t)i;
+ instr >>= 2;
+ instr = ((uint32_t)0x40000000 - (instr & 0x400000))
+ | 0x40000000 | (instr & 0x3FFFFF);
+ put_unaligned_be32(instr, buf + i);
+ }
+ }
+
+ return i;
+}
+#endif
+
+/*
+ * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
+ * of data that got filtered.
+ *
+ * NOTE: This is implemented as a switch statement to avoid using function
+ * pointers, which could be problematic in the kernel boot code, which must
+ * avoid pointers to static data (at least on x86).
+ */
+static void bcj_apply(struct xz_dec_bcj *s,
+ uint8_t *buf, size_t *pos, size_t size)
+{
+ size_t filtered;
+
+ buf += *pos;
+ size -= *pos;
+
+ switch (s->type) {
+#ifdef XZ_DEC_X86
+ case BCJ_X86:
+ filtered = bcj_x86(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_POWERPC
+ case BCJ_POWERPC:
+ filtered = bcj_powerpc(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_IA64
+ case BCJ_IA64:
+ filtered = bcj_ia64(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_ARM
+ case BCJ_ARM:
+ filtered = bcj_arm(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+ case BCJ_ARMTHUMB:
+ filtered = bcj_armthumb(s, buf, size);
+ break;
+#endif
+#ifdef XZ_DEC_SPARC
+ case BCJ_SPARC:
+ filtered = bcj_sparc(s, buf, size);
+ break;
+#endif
+ default:
+ /* Never reached but silence compiler warnings. */
+ filtered = 0;
+ break;
+ }
+
+ *pos += filtered;
+ s->pos += filtered;
+}
+
+/*
+ * Flush pending filtered data from temp to the output buffer.
+ * Move the remaining mixture of possibly filtered and unfiltered
+ * data to the beginning of temp.
+ */
+static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
+{
+ size_t copy_size;
+
+ copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
+ memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
+ b->out_pos += copy_size;
+
+ s->temp.filtered -= copy_size;
+ s->temp.size -= copy_size;
+ memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
+}
+
+/*
+ * The BCJ filter functions are primitive in sense that they process the
+ * data in chunks of 1-16 bytes. To hide this issue, this function does
+ * some buffering.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+ struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b)
+{
+ size_t out_start;
+
+ /*
+ * Flush pending already filtered data to the output buffer. Return
+ * immediately if we couldn't flush everything, or if the next
+ * filter in the chain had already returned XZ_STREAM_END.
+ */
+ if (s->temp.filtered > 0) {
+ bcj_flush(s, b);
+ if (s->temp.filtered > 0)
+ return XZ_OK;
+
+ if (s->ret == XZ_STREAM_END)
+ return XZ_STREAM_END;
+ }
+
+ /*
+ * If we have more output space than what is currently pending in
+ * temp, copy the unfiltered data from temp to the output buffer
+ * and try to fill the output buffer by decoding more data from the
+ * next filter in the chain. Apply the BCJ filter on the new data
+ * in the output buffer. If everything cannot be filtered, copy it
+ * to temp and rewind the output buffer position accordingly.
+ *
+ * This needs to be always run when temp.size == 0 to handle a special
+ * case where the output buffer is full and the next filter has no
+ * more output coming but hasn't returned XZ_STREAM_END yet.
+ */
+ if (s->temp.size < b->out_size - b->out_pos || s->temp.size == 0) {
+ out_start = b->out_pos;
+ memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
+ b->out_pos += s->temp.size;
+
+ s->ret = xz_dec_lzma2_run(lzma2, b);
+ if (s->ret != XZ_STREAM_END
+ && (s->ret != XZ_OK || s->single_call))
+ return s->ret;
+
+ bcj_apply(s, b->out, &out_start, b->out_pos);
+
+ /*
+ * As an exception, if the next filter returned XZ_STREAM_END,
+ * we can do that too, since the last few bytes that remain
+ * unfiltered are meant to remain unfiltered.
+ */
+ if (s->ret == XZ_STREAM_END)
+ return XZ_STREAM_END;
+
+ s->temp.size = b->out_pos - out_start;
+ b->out_pos -= s->temp.size;
+ memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+
+ /*
+ * If there wasn't enough input to the next filter to fill
+ * the output buffer with unfiltered data, there's no point
+ * to try decoding more data to temp.
+ */
+ if (b->out_pos + s->temp.size < b->out_size)
+ return XZ_OK;
+ }
+
+ /*
+ * We have unfiltered data in temp. If the output buffer isn't full
+ * yet, try to fill the temp buffer by decoding more data from the
+ * next filter. Apply the BCJ filter on temp. Then we hopefully can
+ * fill the actual output buffer by copying filtered data from temp.
+ * A mix of filtered and unfiltered data may be left in temp; it will
+ * be taken care on the next call to this function.
+ */
+ if (b->out_pos < b->out_size) {
+ /* Make b->out{,_pos,_size} temporarily point to s->temp. */
+ s->out = b->out;
+ s->out_pos = b->out_pos;
+ s->out_size = b->out_size;
+ b->out = s->temp.buf;
+ b->out_pos = s->temp.size;
+ b->out_size = sizeof(s->temp.buf);
+
+ s->ret = xz_dec_lzma2_run(lzma2, b);
+
+ s->temp.size = b->out_pos;
+ b->out = s->out;
+ b->out_pos = s->out_pos;
+ b->out_size = s->out_size;
+
+ if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
+ return s->ret;
+
+ bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
+
+ /*
+ * If the next filter returned XZ_STREAM_END, we mark that
+ * everything is filtered, since the last unfiltered bytes
+ * of the stream are meant to be left as is.
+ */
+ if (s->ret == XZ_STREAM_END)
+ s->temp.filtered = s->temp.size;
+
+ bcj_flush(s, b);
+ if (s->temp.filtered > 0)
+ return XZ_OK;
+ }
+
+ return s->ret;
+}
+
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+{
+ struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s != NULL)
+ s->single_call = single_call;
+
+ return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+{
+ switch (id) {
+#ifdef XZ_DEC_X86
+ case BCJ_X86:
+#endif
+#ifdef XZ_DEC_POWERPC
+ case BCJ_POWERPC:
+#endif
+#ifdef XZ_DEC_IA64
+ case BCJ_IA64:
+#endif
+#ifdef XZ_DEC_ARM
+ case BCJ_ARM:
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+ case BCJ_ARMTHUMB:
+#endif
+#ifdef XZ_DEC_SPARC
+ case BCJ_SPARC:
+#endif
+ break;
+
+ default:
+ /* Unsupported Filter ID */
+ return XZ_OPTIONS_ERROR;
+ }
+
+ s->type = id;
+ s->ret = XZ_OK;
+ s->pos = 0;
+ s->x86_prev_mask = 0;
+ s->temp.filtered = 0;
+ s->temp.size = 0;
+
+ return XZ_OK;
+}
+
+#endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
new file mode 100644
index 000000000..27ce34520
--- /dev/null
+++ b/lib/xz/xz_dec_lzma2.c
@@ -0,0 +1,1344 @@
+/*
+ * LZMA2 decoder
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ * Igor Pavlov <https://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_lzma2.h"
+
+/*
+ * Range decoder initialization eats the first five bytes of each LZMA chunk.
+ */
+#define RC_INIT_BYTES 5
+
+/*
+ * Minimum number of usable input buffer to safely decode one LZMA symbol.
+ * The worst case is that we decode 22 bits using probabilities and 26
+ * direct bits. This may decode at maximum of 20 bytes of input. However,
+ * lzma_main() does an extra normalization before returning, thus we
+ * need to put 21 here.
+ */
+#define LZMA_IN_REQUIRED 21
+
+/*
+ * Dictionary (history buffer)
+ *
+ * These are always true:
+ * start <= pos <= full <= end
+ * pos <= limit <= end
+ *
+ * In multi-call mode, also these are true:
+ * end == size
+ * size <= size_max
+ * allocated <= size
+ *
+ * Most of these variables are size_t to support single-call mode,
+ * in which the dictionary variables address the actual output
+ * buffer directly.
+ */
+struct dictionary {
+ /* Beginning of the history buffer */
+ uint8_t *buf;
+
+ /* Old position in buf (before decoding more data) */
+ size_t start;
+
+ /* Position in buf */
+ size_t pos;
+
+ /*
+ * How full dictionary is. This is used to detect corrupt input that
+ * would read beyond the beginning of the uncompressed stream.
+ */
+ size_t full;
+
+ /* Write limit; we don't write to buf[limit] or later bytes. */
+ size_t limit;
+
+ /*
+ * End of the dictionary buffer. In multi-call mode, this is
+ * the same as the dictionary size. In single-call mode, this
+ * indicates the size of the output buffer.
+ */
+ size_t end;
+
+ /*
+ * Size of the dictionary as specified in Block Header. This is used
+ * together with "full" to detect corrupt input that would make us
+ * read beyond the beginning of the uncompressed stream.
+ */
+ uint32_t size;
+
+ /*
+ * Maximum allowed dictionary size in multi-call mode.
+ * This is ignored in single-call mode.
+ */
+ uint32_t size_max;
+
+ /*
+ * Amount of memory currently allocated for the dictionary.
+ * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
+ * size_max is always the same as the allocated size.)
+ */
+ uint32_t allocated;
+
+ /* Operation mode */
+ enum xz_mode mode;
+};
+
+/* Range decoder */
+struct rc_dec {
+ uint32_t range;
+ uint32_t code;
+
+ /*
+ * Number of initializing bytes remaining to be read
+ * by rc_read_init().
+ */
+ uint32_t init_bytes_left;
+
+ /*
+ * Buffer from which we read our input. It can be either
+ * temp.buf or the caller-provided input buffer.
+ */
+ const uint8_t *in;
+ size_t in_pos;
+ size_t in_limit;
+};
+
+/* Probabilities for a length decoder. */
+struct lzma_len_dec {
+ /* Probability of match length being at least 10 */
+ uint16_t choice;
+
+ /* Probability of match length being at least 18 */
+ uint16_t choice2;
+
+ /* Probabilities for match lengths 2-9 */
+ uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
+
+ /* Probabilities for match lengths 10-17 */
+ uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
+
+ /* Probabilities for match lengths 18-273 */
+ uint16_t high[LEN_HIGH_SYMBOLS];
+};
+
+struct lzma_dec {
+ /* Distances of latest four matches */
+ uint32_t rep0;
+ uint32_t rep1;
+ uint32_t rep2;
+ uint32_t rep3;
+
+ /* Types of the most recently seen LZMA symbols */
+ enum lzma_state state;
+
+ /*
+ * Length of a match. This is updated so that dict_repeat can
+ * be called again to finish repeating the whole match.
+ */
+ uint32_t len;
+
+ /*
+ * LZMA properties or related bit masks (number of literal
+ * context bits, a mask derived from the number of literal
+ * position bits, and a mask derived from the number
+ * position bits)
+ */
+ uint32_t lc;
+ uint32_t literal_pos_mask; /* (1 << lp) - 1 */
+ uint32_t pos_mask; /* (1 << pb) - 1 */
+
+ /* If 1, it's a match. Otherwise it's a single 8-bit literal. */
+ uint16_t is_match[STATES][POS_STATES_MAX];
+
+ /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
+ uint16_t is_rep[STATES];
+
+ /*
+ * If 0, distance of a repeated match is rep0.
+ * Otherwise check is_rep1.
+ */
+ uint16_t is_rep0[STATES];
+
+ /*
+ * If 0, distance of a repeated match is rep1.
+ * Otherwise check is_rep2.
+ */
+ uint16_t is_rep1[STATES];
+
+ /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
+ uint16_t is_rep2[STATES];
+
+ /*
+ * If 1, the repeated match has length of one byte. Otherwise
+ * the length is decoded from rep_len_decoder.
+ */
+ uint16_t is_rep0_long[STATES][POS_STATES_MAX];
+
+ /*
+ * Probability tree for the highest two bits of the match
+ * distance. There is a separate probability tree for match
+ * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
+ */
+ uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
+
+ /*
+ * Probility trees for additional bits for match distance
+ * when the distance is in the range [4, 127].
+ */
+ uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
+
+ /*
+ * Probability tree for the lowest four bits of a match
+ * distance that is equal to or greater than 128.
+ */
+ uint16_t dist_align[ALIGN_SIZE];
+
+ /* Length of a normal match */
+ struct lzma_len_dec match_len_dec;
+
+ /* Length of a repeated match */
+ struct lzma_len_dec rep_len_dec;
+
+ /* Probabilities of literals */
+ uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
+};
+
+struct lzma2_dec {
+ /* Position in xz_dec_lzma2_run(). */
+ enum lzma2_seq {
+ SEQ_CONTROL,
+ SEQ_UNCOMPRESSED_1,
+ SEQ_UNCOMPRESSED_2,
+ SEQ_COMPRESSED_0,
+ SEQ_COMPRESSED_1,
+ SEQ_PROPERTIES,
+ SEQ_LZMA_PREPARE,
+ SEQ_LZMA_RUN,
+ SEQ_COPY
+ } sequence;
+
+ /* Next position after decoding the compressed size of the chunk. */
+ enum lzma2_seq next_sequence;
+
+ /* Uncompressed size of LZMA chunk (2 MiB at maximum) */
+ uint32_t uncompressed;
+
+ /*
+ * Compressed size of LZMA chunk or compressed/uncompressed
+ * size of uncompressed chunk (64 KiB at maximum)
+ */
+ uint32_t compressed;
+
+ /*
+ * True if dictionary reset is needed. This is false before
+ * the first chunk (LZMA or uncompressed).
+ */
+ bool need_dict_reset;
+
+ /*
+ * True if new LZMA properties are needed. This is false
+ * before the first LZMA chunk.
+ */
+ bool need_props;
+
+#ifdef XZ_DEC_MICROLZMA
+ bool pedantic_microlzma;
+#endif
+};
+
+struct xz_dec_lzma2 {
+ /*
+ * The order below is important on x86 to reduce code size and
+ * it shouldn't hurt on other platforms. Everything up to and
+ * including lzma.pos_mask are in the first 128 bytes on x86-32,
+ * which allows using smaller instructions to access those
+ * variables. On x86-64, fewer variables fit into the first 128
+ * bytes, but this is still the best order without sacrificing
+ * the readability by splitting the structures.
+ */
+ struct rc_dec rc;
+ struct dictionary dict;
+ struct lzma2_dec lzma2;
+ struct lzma_dec lzma;
+
+ /*
+ * Temporary buffer which holds small number of input bytes between
+ * decoder calls. See lzma2_lzma() for details.
+ */
+ struct {
+ uint32_t size;
+ uint8_t buf[3 * LZMA_IN_REQUIRED];
+ } temp;
+};
+
+/**************
+ * Dictionary *
+ **************/
+
+/*
+ * Reset the dictionary state. When in single-call mode, set up the beginning
+ * of the dictionary to point to the actual output buffer.
+ */
+static void dict_reset(struct dictionary *dict, struct xz_buf *b)
+{
+ if (DEC_IS_SINGLE(dict->mode)) {
+ dict->buf = b->out + b->out_pos;
+ dict->end = b->out_size - b->out_pos;
+ }
+
+ dict->start = 0;
+ dict->pos = 0;
+ dict->limit = 0;
+ dict->full = 0;
+}
+
+/* Set dictionary write limit */
+static void dict_limit(struct dictionary *dict, size_t out_max)
+{
+ if (dict->end - dict->pos <= out_max)
+ dict->limit = dict->end;
+ else
+ dict->limit = dict->pos + out_max;
+}
+
+/* Return true if at least one byte can be written into the dictionary. */
+static inline bool dict_has_space(const struct dictionary *dict)
+{
+ return dict->pos < dict->limit;
+}
+
+/*
+ * Get a byte from the dictionary at the given distance. The distance is
+ * assumed to valid, or as a special case, zero when the dictionary is
+ * still empty. This special case is needed for single-call decoding to
+ * avoid writing a '\0' to the end of the destination buffer.
+ */
+static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
+{
+ size_t offset = dict->pos - dist - 1;
+
+ if (dist >= dict->pos)
+ offset += dict->end;
+
+ return dict->full > 0 ? dict->buf[offset] : 0;
+}
+
+/*
+ * Put one byte into the dictionary. It is assumed that there is space for it.
+ */
+static inline void dict_put(struct dictionary *dict, uint8_t byte)
+{
+ dict->buf[dict->pos++] = byte;
+
+ if (dict->full < dict->pos)
+ dict->full = dict->pos;
+}
+
+/*
+ * Repeat given number of bytes from the given distance. If the distance is
+ * invalid, false is returned. On success, true is returned and *len is
+ * updated to indicate how many bytes were left to be repeated.
+ */
+static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
+{
+ size_t back;
+ uint32_t left;
+
+ if (dist >= dict->full || dist >= dict->size)
+ return false;
+
+ left = min_t(size_t, dict->limit - dict->pos, *len);
+ *len -= left;
+
+ back = dict->pos - dist - 1;
+ if (dist >= dict->pos)
+ back += dict->end;
+
+ do {
+ dict->buf[dict->pos++] = dict->buf[back++];
+ if (back == dict->end)
+ back = 0;
+ } while (--left > 0);
+
+ if (dict->full < dict->pos)
+ dict->full = dict->pos;
+
+ return true;
+}
+
+/* Copy uncompressed data as is from input to dictionary and output buffers. */
+static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
+ uint32_t *left)
+{
+ size_t copy_size;
+
+ while (*left > 0 && b->in_pos < b->in_size
+ && b->out_pos < b->out_size) {
+ copy_size = min(b->in_size - b->in_pos,
+ b->out_size - b->out_pos);
+ if (copy_size > dict->end - dict->pos)
+ copy_size = dict->end - dict->pos;
+ if (copy_size > *left)
+ copy_size = *left;
+
+ *left -= copy_size;
+
+ /*
+ * If doing in-place decompression in single-call mode and the
+ * uncompressed size of the file is larger than the caller
+ * thought (i.e. it is invalid input!), the buffers below may
+ * overlap and cause undefined behavior with memcpy().
+ * With valid inputs memcpy() would be fine here.
+ */
+ memmove(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
+ dict->pos += copy_size;
+
+ if (dict->full < dict->pos)
+ dict->full = dict->pos;
+
+ if (DEC_IS_MULTI(dict->mode)) {
+ if (dict->pos == dict->end)
+ dict->pos = 0;
+
+ /*
+ * Like above but for multi-call mode: use memmove()
+ * to avoid undefined behavior with invalid input.
+ */
+ memmove(b->out + b->out_pos, b->in + b->in_pos,
+ copy_size);
+ }
+
+ dict->start = dict->pos;
+
+ b->out_pos += copy_size;
+ b->in_pos += copy_size;
+ }
+}
+
+#ifdef XZ_DEC_MICROLZMA
+# define DICT_FLUSH_SUPPORTS_SKIPPING true
+#else
+# define DICT_FLUSH_SUPPORTS_SKIPPING false
+#endif
+
+/*
+ * Flush pending data from dictionary to b->out. It is assumed that there is
+ * enough space in b->out. This is guaranteed because caller uses dict_limit()
+ * before decoding data into the dictionary.
+ */
+static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
+{
+ size_t copy_size = dict->pos - dict->start;
+
+ if (DEC_IS_MULTI(dict->mode)) {
+ if (dict->pos == dict->end)
+ dict->pos = 0;
+
+ /*
+ * These buffers cannot overlap even if doing in-place
+ * decompression because in multi-call mode dict->buf
+ * has been allocated by us in this file; it's not
+ * provided by the caller like in single-call mode.
+ *
+ * With MicroLZMA, b->out can be NULL to skip bytes that
+ * the caller doesn't need. This cannot be done with XZ
+ * because it would break BCJ filters.
+ */
+ if (!DICT_FLUSH_SUPPORTS_SKIPPING || b->out != NULL)
+ memcpy(b->out + b->out_pos, dict->buf + dict->start,
+ copy_size);
+ }
+
+ dict->start = dict->pos;
+ b->out_pos += copy_size;
+ return copy_size;
+}
+
+/*****************
+ * Range decoder *
+ *****************/
+
+/* Reset the range decoder. */
+static void rc_reset(struct rc_dec *rc)
+{
+ rc->range = (uint32_t)-1;
+ rc->code = 0;
+ rc->init_bytes_left = RC_INIT_BYTES;
+}
+
+/*
+ * Read the first five initial bytes into rc->code if they haven't been
+ * read already. (Yes, the first byte gets completely ignored.)
+ */
+static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
+{
+ while (rc->init_bytes_left > 0) {
+ if (b->in_pos == b->in_size)
+ return false;
+
+ rc->code = (rc->code << 8) + b->in[b->in_pos++];
+ --rc->init_bytes_left;
+ }
+
+ return true;
+}
+
+/* Return true if there may not be enough input for the next decoding loop. */
+static inline bool rc_limit_exceeded(const struct rc_dec *rc)
+{
+ return rc->in_pos > rc->in_limit;
+}
+
+/*
+ * Return true if it is possible (from point of view of range decoder) that
+ * we have reached the end of the LZMA chunk.
+ */
+static inline bool rc_is_finished(const struct rc_dec *rc)
+{
+ return rc->code == 0;
+}
+
+/* Read the next input byte if needed. */
+static __always_inline void rc_normalize(struct rc_dec *rc)
+{
+ if (rc->range < RC_TOP_VALUE) {
+ rc->range <<= RC_SHIFT_BITS;
+ rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
+ }
+}
+
+/*
+ * Decode one bit. In some versions, this function has been split in three
+ * functions so that the compiler is supposed to be able to more easily avoid
+ * an extra branch. In this particular version of the LZMA decoder, this
+ * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
+ * on x86). Using a non-split version results in nicer looking code too.
+ *
+ * NOTE: This must return an int. Do not make it return a bool or the speed
+ * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
+ * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
+ */
+static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
+{
+ uint32_t bound;
+ int bit;
+
+ rc_normalize(rc);
+ bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
+ if (rc->code < bound) {
+ rc->range = bound;
+ *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
+ bit = 0;
+ } else {
+ rc->range -= bound;
+ rc->code -= bound;
+ *prob -= *prob >> RC_MOVE_BITS;
+ bit = 1;
+ }
+
+ return bit;
+}
+
+/* Decode a bittree starting from the most significant bit. */
+static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
+ uint16_t *probs, uint32_t limit)
+{
+ uint32_t symbol = 1;
+
+ do {
+ if (rc_bit(rc, &probs[symbol]))
+ symbol = (symbol << 1) + 1;
+ else
+ symbol <<= 1;
+ } while (symbol < limit);
+
+ return symbol;
+}
+
+/* Decode a bittree starting from the least significant bit. */
+static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
+ uint16_t *probs,
+ uint32_t *dest, uint32_t limit)
+{
+ uint32_t symbol = 1;
+ uint32_t i = 0;
+
+ do {
+ if (rc_bit(rc, &probs[symbol])) {
+ symbol = (symbol << 1) + 1;
+ *dest += 1 << i;
+ } else {
+ symbol <<= 1;
+ }
+ } while (++i < limit);
+}
+
+/* Decode direct bits (fixed fifty-fifty probability) */
+static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
+{
+ uint32_t mask;
+
+ do {
+ rc_normalize(rc);
+ rc->range >>= 1;
+ rc->code -= rc->range;
+ mask = (uint32_t)0 - (rc->code >> 31);
+ rc->code += rc->range & mask;
+ *dest = (*dest << 1) + (mask + 1);
+ } while (--limit > 0);
+}
+
+/********
+ * LZMA *
+ ********/
+
+/* Get pointer to literal coder probability array. */
+static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
+{
+ uint32_t prev_byte = dict_get(&s->dict, 0);
+ uint32_t low = prev_byte >> (8 - s->lzma.lc);
+ uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
+ return s->lzma.literal[low + high];
+}
+
+/* Decode a literal (one 8-bit byte) */
+static void lzma_literal(struct xz_dec_lzma2 *s)
+{
+ uint16_t *probs;
+ uint32_t symbol;
+ uint32_t match_byte;
+ uint32_t match_bit;
+ uint32_t offset;
+ uint32_t i;
+
+ probs = lzma_literal_probs(s);
+
+ if (lzma_state_is_literal(s->lzma.state)) {
+ symbol = rc_bittree(&s->rc, probs, 0x100);
+ } else {
+ symbol = 1;
+ match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
+ offset = 0x100;
+
+ do {
+ match_bit = match_byte & offset;
+ match_byte <<= 1;
+ i = offset + match_bit + symbol;
+
+ if (rc_bit(&s->rc, &probs[i])) {
+ symbol = (symbol << 1) + 1;
+ offset &= match_bit;
+ } else {
+ symbol <<= 1;
+ offset &= ~match_bit;
+ }
+ } while (symbol < 0x100);
+ }
+
+ dict_put(&s->dict, (uint8_t)symbol);
+ lzma_state_literal(&s->lzma.state);
+}
+
+/* Decode the length of the match into s->lzma.len. */
+static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
+ uint32_t pos_state)
+{
+ uint16_t *probs;
+ uint32_t limit;
+
+ if (!rc_bit(&s->rc, &l->choice)) {
+ probs = l->low[pos_state];
+ limit = LEN_LOW_SYMBOLS;
+ s->lzma.len = MATCH_LEN_MIN;
+ } else {
+ if (!rc_bit(&s->rc, &l->choice2)) {
+ probs = l->mid[pos_state];
+ limit = LEN_MID_SYMBOLS;
+ s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
+ } else {
+ probs = l->high;
+ limit = LEN_HIGH_SYMBOLS;
+ s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
+ + LEN_MID_SYMBOLS;
+ }
+ }
+
+ s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
+}
+
+/* Decode a match. The distance will be stored in s->lzma.rep0. */
+static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+ uint16_t *probs;
+ uint32_t dist_slot;
+ uint32_t limit;
+
+ lzma_state_match(&s->lzma.state);
+
+ s->lzma.rep3 = s->lzma.rep2;
+ s->lzma.rep2 = s->lzma.rep1;
+ s->lzma.rep1 = s->lzma.rep0;
+
+ lzma_len(s, &s->lzma.match_len_dec, pos_state);
+
+ probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
+ dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
+
+ if (dist_slot < DIST_MODEL_START) {
+ s->lzma.rep0 = dist_slot;
+ } else {
+ limit = (dist_slot >> 1) - 1;
+ s->lzma.rep0 = 2 + (dist_slot & 1);
+
+ if (dist_slot < DIST_MODEL_END) {
+ s->lzma.rep0 <<= limit;
+ probs = s->lzma.dist_special + s->lzma.rep0
+ - dist_slot - 1;
+ rc_bittree_reverse(&s->rc, probs,
+ &s->lzma.rep0, limit);
+ } else {
+ rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
+ s->lzma.rep0 <<= ALIGN_BITS;
+ rc_bittree_reverse(&s->rc, s->lzma.dist_align,
+ &s->lzma.rep0, ALIGN_BITS);
+ }
+ }
+}
+
+/*
+ * Decode a repeated match. The distance is one of the four most recently
+ * seen matches. The distance will be stored in s->lzma.rep0.
+ */
+static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+ uint32_t tmp;
+
+ if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
+ if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
+ s->lzma.state][pos_state])) {
+ lzma_state_short_rep(&s->lzma.state);
+ s->lzma.len = 1;
+ return;
+ }
+ } else {
+ if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
+ tmp = s->lzma.rep1;
+ } else {
+ if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
+ tmp = s->lzma.rep2;
+ } else {
+ tmp = s->lzma.rep3;
+ s->lzma.rep3 = s->lzma.rep2;
+ }
+
+ s->lzma.rep2 = s->lzma.rep1;
+ }
+
+ s->lzma.rep1 = s->lzma.rep0;
+ s->lzma.rep0 = tmp;
+ }
+
+ lzma_state_long_rep(&s->lzma.state);
+ lzma_len(s, &s->lzma.rep_len_dec, pos_state);
+}
+
+/* LZMA decoder core */
+static bool lzma_main(struct xz_dec_lzma2 *s)
+{
+ uint32_t pos_state;
+
+ /*
+ * If the dictionary was reached during the previous call, try to
+ * finish the possibly pending repeat in the dictionary.
+ */
+ if (dict_has_space(&s->dict) && s->lzma.len > 0)
+ dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
+
+ /*
+ * Decode more LZMA symbols. One iteration may consume up to
+ * LZMA_IN_REQUIRED - 1 bytes.
+ */
+ while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
+ pos_state = s->dict.pos & s->lzma.pos_mask;
+
+ if (!rc_bit(&s->rc, &s->lzma.is_match[
+ s->lzma.state][pos_state])) {
+ lzma_literal(s);
+ } else {
+ if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
+ lzma_rep_match(s, pos_state);
+ else
+ lzma_match(s, pos_state);
+
+ if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
+ return false;
+ }
+ }
+
+ /*
+ * Having the range decoder always normalized when we are outside
+ * this function makes it easier to correctly handle end of the chunk.
+ */
+ rc_normalize(&s->rc);
+
+ return true;
+}
+
+/*
+ * Reset the LZMA decoder and range decoder state. Dictionary is not reset
+ * here, because LZMA state may be reset without resetting the dictionary.
+ */
+static void lzma_reset(struct xz_dec_lzma2 *s)
+{
+ uint16_t *probs;
+ size_t i;
+
+ s->lzma.state = STATE_LIT_LIT;
+ s->lzma.rep0 = 0;
+ s->lzma.rep1 = 0;
+ s->lzma.rep2 = 0;
+ s->lzma.rep3 = 0;
+ s->lzma.len = 0;
+
+ /*
+ * All probabilities are initialized to the same value. This hack
+ * makes the code smaller by avoiding a separate loop for each
+ * probability array.
+ *
+ * This could be optimized so that only that part of literal
+ * probabilities that are actually required. In the common case
+ * we would write 12 KiB less.
+ */
+ probs = s->lzma.is_match[0];
+ for (i = 0; i < PROBS_TOTAL; ++i)
+ probs[i] = RC_BIT_MODEL_TOTAL / 2;
+
+ rc_reset(&s->rc);
+}
+
+/*
+ * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
+ * from the decoded lp and pb values. On success, the LZMA decoder state is
+ * reset and true is returned.
+ */
+static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
+{
+ if (props > (4 * 5 + 4) * 9 + 8)
+ return false;
+
+ s->lzma.pos_mask = 0;
+ while (props >= 9 * 5) {
+ props -= 9 * 5;
+ ++s->lzma.pos_mask;
+ }
+
+ s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
+
+ s->lzma.literal_pos_mask = 0;
+ while (props >= 9) {
+ props -= 9;
+ ++s->lzma.literal_pos_mask;
+ }
+
+ s->lzma.lc = props;
+
+ if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
+ return false;
+
+ s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
+
+ lzma_reset(s);
+
+ return true;
+}
+
+/*********
+ * LZMA2 *
+ *********/
+
+/*
+ * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
+ * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
+ * wrapper function takes care of making the LZMA decoder's assumption safe.
+ *
+ * As long as there is plenty of input left to be decoded in the current LZMA
+ * chunk, we decode directly from the caller-supplied input buffer until
+ * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
+ * s->temp.buf, which (hopefully) gets filled on the next call to this
+ * function. We decode a few bytes from the temporary buffer so that we can
+ * continue decoding from the caller-supplied input buffer again.
+ */
+static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
+{
+ size_t in_avail;
+ uint32_t tmp;
+
+ in_avail = b->in_size - b->in_pos;
+ if (s->temp.size > 0 || s->lzma2.compressed == 0) {
+ tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
+ if (tmp > s->lzma2.compressed - s->temp.size)
+ tmp = s->lzma2.compressed - s->temp.size;
+ if (tmp > in_avail)
+ tmp = in_avail;
+
+ memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
+
+ if (s->temp.size + tmp == s->lzma2.compressed) {
+ memzero(s->temp.buf + s->temp.size + tmp,
+ sizeof(s->temp.buf)
+ - s->temp.size - tmp);
+ s->rc.in_limit = s->temp.size + tmp;
+ } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
+ s->temp.size += tmp;
+ b->in_pos += tmp;
+ return true;
+ } else {
+ s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
+ }
+
+ s->rc.in = s->temp.buf;
+ s->rc.in_pos = 0;
+
+ if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
+ return false;
+
+ s->lzma2.compressed -= s->rc.in_pos;
+
+ if (s->rc.in_pos < s->temp.size) {
+ s->temp.size -= s->rc.in_pos;
+ memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
+ s->temp.size);
+ return true;
+ }
+
+ b->in_pos += s->rc.in_pos - s->temp.size;
+ s->temp.size = 0;
+ }
+
+ in_avail = b->in_size - b->in_pos;
+ if (in_avail >= LZMA_IN_REQUIRED) {
+ s->rc.in = b->in;
+ s->rc.in_pos = b->in_pos;
+
+ if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
+ s->rc.in_limit = b->in_pos + s->lzma2.compressed;
+ else
+ s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
+
+ if (!lzma_main(s))
+ return false;
+
+ in_avail = s->rc.in_pos - b->in_pos;
+ if (in_avail > s->lzma2.compressed)
+ return false;
+
+ s->lzma2.compressed -= in_avail;
+ b->in_pos = s->rc.in_pos;
+ }
+
+ in_avail = b->in_size - b->in_pos;
+ if (in_avail < LZMA_IN_REQUIRED) {
+ if (in_avail > s->lzma2.compressed)
+ in_avail = s->lzma2.compressed;
+
+ memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
+ s->temp.size = in_avail;
+ b->in_pos += in_avail;
+ }
+
+ return true;
+}
+
+/*
+ * Take care of the LZMA2 control layer, and forward the job of actual LZMA
+ * decoding or copying of uncompressed chunks to other functions.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+ struct xz_buf *b)
+{
+ uint32_t tmp;
+
+ while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
+ switch (s->lzma2.sequence) {
+ case SEQ_CONTROL:
+ /*
+ * LZMA2 control byte
+ *
+ * Exact values:
+ * 0x00 End marker
+ * 0x01 Dictionary reset followed by
+ * an uncompressed chunk
+ * 0x02 Uncompressed chunk (no dictionary reset)
+ *
+ * Highest three bits (s->control & 0xE0):
+ * 0xE0 Dictionary reset, new properties and state
+ * reset, followed by LZMA compressed chunk
+ * 0xC0 New properties and state reset, followed
+ * by LZMA compressed chunk (no dictionary
+ * reset)
+ * 0xA0 State reset using old properties,
+ * followed by LZMA compressed chunk (no
+ * dictionary reset)
+ * 0x80 LZMA chunk (no dictionary or state reset)
+ *
+ * For LZMA compressed chunks, the lowest five bits
+ * (s->control & 1F) are the highest bits of the
+ * uncompressed size (bits 16-20).
+ *
+ * A new LZMA2 stream must begin with a dictionary
+ * reset. The first LZMA chunk must set new
+ * properties and reset the LZMA state.
+ *
+ * Values that don't match anything described above
+ * are invalid and we return XZ_DATA_ERROR.
+ */
+ tmp = b->in[b->in_pos++];
+
+ if (tmp == 0x00)
+ return XZ_STREAM_END;
+
+ if (tmp >= 0xE0 || tmp == 0x01) {
+ s->lzma2.need_props = true;
+ s->lzma2.need_dict_reset = false;
+ dict_reset(&s->dict, b);
+ } else if (s->lzma2.need_dict_reset) {
+ return XZ_DATA_ERROR;
+ }
+
+ if (tmp >= 0x80) {
+ s->lzma2.uncompressed = (tmp & 0x1F) << 16;
+ s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
+
+ if (tmp >= 0xC0) {
+ /*
+ * When there are new properties,
+ * state reset is done at
+ * SEQ_PROPERTIES.
+ */
+ s->lzma2.need_props = false;
+ s->lzma2.next_sequence
+ = SEQ_PROPERTIES;
+
+ } else if (s->lzma2.need_props) {
+ return XZ_DATA_ERROR;
+
+ } else {
+ s->lzma2.next_sequence
+ = SEQ_LZMA_PREPARE;
+ if (tmp >= 0xA0)
+ lzma_reset(s);
+ }
+ } else {
+ if (tmp > 0x02)
+ return XZ_DATA_ERROR;
+
+ s->lzma2.sequence = SEQ_COMPRESSED_0;
+ s->lzma2.next_sequence = SEQ_COPY;
+ }
+
+ break;
+
+ case SEQ_UNCOMPRESSED_1:
+ s->lzma2.uncompressed
+ += (uint32_t)b->in[b->in_pos++] << 8;
+ s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
+ break;
+
+ case SEQ_UNCOMPRESSED_2:
+ s->lzma2.uncompressed
+ += (uint32_t)b->in[b->in_pos++] + 1;
+ s->lzma2.sequence = SEQ_COMPRESSED_0;
+ break;
+
+ case SEQ_COMPRESSED_0:
+ s->lzma2.compressed
+ = (uint32_t)b->in[b->in_pos++] << 8;
+ s->lzma2.sequence = SEQ_COMPRESSED_1;
+ break;
+
+ case SEQ_COMPRESSED_1:
+ s->lzma2.compressed
+ += (uint32_t)b->in[b->in_pos++] + 1;
+ s->lzma2.sequence = s->lzma2.next_sequence;
+ break;
+
+ case SEQ_PROPERTIES:
+ if (!lzma_props(s, b->in[b->in_pos++]))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.sequence = SEQ_LZMA_PREPARE;
+
+ fallthrough;
+
+ case SEQ_LZMA_PREPARE:
+ if (s->lzma2.compressed < RC_INIT_BYTES)
+ return XZ_DATA_ERROR;
+
+ if (!rc_read_init(&s->rc, b))
+ return XZ_OK;
+
+ s->lzma2.compressed -= RC_INIT_BYTES;
+ s->lzma2.sequence = SEQ_LZMA_RUN;
+
+ fallthrough;
+
+ case SEQ_LZMA_RUN:
+ /*
+ * Set dictionary limit to indicate how much we want
+ * to be encoded at maximum. Decode new data into the
+ * dictionary. Flush the new data from dictionary to
+ * b->out. Check if we finished decoding this chunk.
+ * In case the dictionary got full but we didn't fill
+ * the output buffer yet, we may run this loop
+ * multiple times without changing s->lzma2.sequence.
+ */
+ dict_limit(&s->dict, min_t(size_t,
+ b->out_size - b->out_pos,
+ s->lzma2.uncompressed));
+ if (!lzma2_lzma(s, b))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+ if (s->lzma2.uncompressed == 0) {
+ if (s->lzma2.compressed > 0 || s->lzma.len > 0
+ || !rc_is_finished(&s->rc))
+ return XZ_DATA_ERROR;
+
+ rc_reset(&s->rc);
+ s->lzma2.sequence = SEQ_CONTROL;
+
+ } else if (b->out_pos == b->out_size
+ || (b->in_pos == b->in_size
+ && s->temp.size
+ < s->lzma2.compressed)) {
+ return XZ_OK;
+ }
+
+ break;
+
+ case SEQ_COPY:
+ dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
+ if (s->lzma2.compressed > 0)
+ return XZ_OK;
+
+ s->lzma2.sequence = SEQ_CONTROL;
+ break;
+ }
+ }
+
+ return XZ_OK;
+}
+
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+ uint32_t dict_max)
+{
+ struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return NULL;
+
+ s->dict.mode = mode;
+ s->dict.size_max = dict_max;
+
+ if (DEC_IS_PREALLOC(mode)) {
+ s->dict.buf = vmalloc(dict_max);
+ if (s->dict.buf == NULL) {
+ kfree(s);
+ return NULL;
+ }
+ } else if (DEC_IS_DYNALLOC(mode)) {
+ s->dict.buf = NULL;
+ s->dict.allocated = 0;
+ }
+
+ return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+{
+ /* This limits dictionary size to 3 GiB to keep parsing simpler. */
+ if (props > 39)
+ return XZ_OPTIONS_ERROR;
+
+ s->dict.size = 2 + (props & 1);
+ s->dict.size <<= (props >> 1) + 11;
+
+ if (DEC_IS_MULTI(s->dict.mode)) {
+ if (s->dict.size > s->dict.size_max)
+ return XZ_MEMLIMIT_ERROR;
+
+ s->dict.end = s->dict.size;
+
+ if (DEC_IS_DYNALLOC(s->dict.mode)) {
+ if (s->dict.allocated < s->dict.size) {
+ s->dict.allocated = s->dict.size;
+ vfree(s->dict.buf);
+ s->dict.buf = vmalloc(s->dict.size);
+ if (s->dict.buf == NULL) {
+ s->dict.allocated = 0;
+ return XZ_MEM_ERROR;
+ }
+ }
+ }
+ }
+
+ s->lzma2.sequence = SEQ_CONTROL;
+ s->lzma2.need_dict_reset = true;
+
+ s->temp.size = 0;
+
+ return XZ_OK;
+}
+
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+{
+ if (DEC_IS_MULTI(s->dict.mode))
+ vfree(s->dict.buf);
+
+ kfree(s);
+}
+
+#ifdef XZ_DEC_MICROLZMA
+/* This is a wrapper struct to have a nice struct name in the public API. */
+struct xz_dec_microlzma {
+ struct xz_dec_lzma2 s;
+};
+
+enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s_ptr,
+ struct xz_buf *b)
+{
+ struct xz_dec_lzma2 *s = &s_ptr->s;
+
+ /*
+ * sequence is SEQ_PROPERTIES before the first input byte,
+ * SEQ_LZMA_PREPARE until a total of five bytes have been read,
+ * and SEQ_LZMA_RUN for the rest of the input stream.
+ */
+ if (s->lzma2.sequence != SEQ_LZMA_RUN) {
+ if (s->lzma2.sequence == SEQ_PROPERTIES) {
+ /* One byte is needed for the props. */
+ if (b->in_pos >= b->in_size)
+ return XZ_OK;
+
+ /*
+ * Don't increment b->in_pos here. The same byte is
+ * also passed to rc_read_init() which will ignore it.
+ */
+ if (!lzma_props(s, ~b->in[b->in_pos]))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.sequence = SEQ_LZMA_PREPARE;
+ }
+
+ /*
+ * xz_dec_microlzma_reset() doesn't validate the compressed
+ * size so we do it here. We have to limit the maximum size
+ * to avoid integer overflows in lzma2_lzma(). 3 GiB is a nice
+ * round number and much more than users of this code should
+ * ever need.
+ */
+ if (s->lzma2.compressed < RC_INIT_BYTES
+ || s->lzma2.compressed > (3U << 30))
+ return XZ_DATA_ERROR;
+
+ if (!rc_read_init(&s->rc, b))
+ return XZ_OK;
+
+ s->lzma2.compressed -= RC_INIT_BYTES;
+ s->lzma2.sequence = SEQ_LZMA_RUN;
+
+ dict_reset(&s->dict, b);
+ }
+
+ /* This is to allow increasing b->out_size between calls. */
+ if (DEC_IS_SINGLE(s->dict.mode))
+ s->dict.end = b->out_size - b->out_pos;
+
+ while (true) {
+ dict_limit(&s->dict, min_t(size_t, b->out_size - b->out_pos,
+ s->lzma2.uncompressed));
+
+ if (!lzma2_lzma(s, b))
+ return XZ_DATA_ERROR;
+
+ s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+ if (s->lzma2.uncompressed == 0) {
+ if (s->lzma2.pedantic_microlzma) {
+ if (s->lzma2.compressed > 0 || s->lzma.len > 0
+ || !rc_is_finished(&s->rc))
+ return XZ_DATA_ERROR;
+ }
+
+ return XZ_STREAM_END;
+ }
+
+ if (b->out_pos == b->out_size)
+ return XZ_OK;
+
+ if (b->in_pos == b->in_size
+ && s->temp.size < s->lzma2.compressed)
+ return XZ_OK;
+ }
+}
+
+struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode,
+ uint32_t dict_size)
+{
+ struct xz_dec_microlzma *s;
+
+ /* Restrict dict_size to the same range as in the LZMA2 code. */
+ if (dict_size < 4096 || dict_size > (3U << 30))
+ return NULL;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return NULL;
+
+ s->s.dict.mode = mode;
+ s->s.dict.size = dict_size;
+
+ if (DEC_IS_MULTI(mode)) {
+ s->s.dict.end = dict_size;
+
+ s->s.dict.buf = vmalloc(dict_size);
+ if (s->s.dict.buf == NULL) {
+ kfree(s);
+ return NULL;
+ }
+ }
+
+ return s;
+}
+
+void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size,
+ uint32_t uncomp_size, int uncomp_size_is_exact)
+{
+ /*
+ * comp_size is validated in xz_dec_microlzma_run().
+ * uncomp_size can safely be anything.
+ */
+ s->s.lzma2.compressed = comp_size;
+ s->s.lzma2.uncompressed = uncomp_size;
+ s->s.lzma2.pedantic_microlzma = uncomp_size_is_exact;
+
+ s->s.lzma2.sequence = SEQ_PROPERTIES;
+ s->s.temp.size = 0;
+}
+
+void xz_dec_microlzma_end(struct xz_dec_microlzma *s)
+{
+ if (DEC_IS_MULTI(s->s.dict.mode))
+ vfree(s->s.dict.buf);
+
+ kfree(s);
+}
+#endif
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
new file mode 100644
index 000000000..683570b93
--- /dev/null
+++ b/lib/xz/xz_dec_stream.c
@@ -0,0 +1,837 @@
+/*
+ * .xz Stream decoder
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_stream.h"
+
+/* Hash used to validate the Index field */
+struct xz_dec_hash {
+ vli_type unpadded;
+ vli_type uncompressed;
+ uint32_t crc32;
+};
+
+struct xz_dec {
+ /* Position in dec_main() */
+ enum {
+ SEQ_STREAM_HEADER,
+ SEQ_BLOCK_START,
+ SEQ_BLOCK_HEADER,
+ SEQ_BLOCK_UNCOMPRESS,
+ SEQ_BLOCK_PADDING,
+ SEQ_BLOCK_CHECK,
+ SEQ_INDEX,
+ SEQ_INDEX_PADDING,
+ SEQ_INDEX_CRC32,
+ SEQ_STREAM_FOOTER
+ } sequence;
+
+ /* Position in variable-length integers and Check fields */
+ uint32_t pos;
+
+ /* Variable-length integer decoded by dec_vli() */
+ vli_type vli;
+
+ /* Saved in_pos and out_pos */
+ size_t in_start;
+ size_t out_start;
+
+ /* CRC32 value in Block or Index */
+ uint32_t crc32;
+
+ /* Type of the integrity check calculated from uncompressed data */
+ enum xz_check check_type;
+
+ /* Operation mode */
+ enum xz_mode mode;
+
+ /*
+ * True if the next call to xz_dec_run() is allowed to return
+ * XZ_BUF_ERROR.
+ */
+ bool allow_buf_error;
+
+ /* Information stored in Block Header */
+ struct {
+ /*
+ * Value stored in the Compressed Size field, or
+ * VLI_UNKNOWN if Compressed Size is not present.
+ */
+ vli_type compressed;
+
+ /*
+ * Value stored in the Uncompressed Size field, or
+ * VLI_UNKNOWN if Uncompressed Size is not present.
+ */
+ vli_type uncompressed;
+
+ /* Size of the Block Header field */
+ uint32_t size;
+ } block_header;
+
+ /* Information collected when decoding Blocks */
+ struct {
+ /* Observed compressed size of the current Block */
+ vli_type compressed;
+
+ /* Observed uncompressed size of the current Block */
+ vli_type uncompressed;
+
+ /* Number of Blocks decoded so far */
+ vli_type count;
+
+ /*
+ * Hash calculated from the Block sizes. This is used to
+ * validate the Index field.
+ */
+ struct xz_dec_hash hash;
+ } block;
+
+ /* Variables needed when verifying the Index field */
+ struct {
+ /* Position in dec_index() */
+ enum {
+ SEQ_INDEX_COUNT,
+ SEQ_INDEX_UNPADDED,
+ SEQ_INDEX_UNCOMPRESSED
+ } sequence;
+
+ /* Size of the Index in bytes */
+ vli_type size;
+
+ /* Number of Records (matches block.count in valid files) */
+ vli_type count;
+
+ /*
+ * Hash calculated from the Records (matches block.hash in
+ * valid files).
+ */
+ struct xz_dec_hash hash;
+ } index;
+
+ /*
+ * Temporary buffer needed to hold Stream Header, Block Header,
+ * and Stream Footer. The Block Header is the biggest (1 KiB)
+ * so we reserve space according to that. buf[] has to be aligned
+ * to a multiple of four bytes; the size_t variables before it
+ * should guarantee this.
+ */
+ struct {
+ size_t pos;
+ size_t size;
+ uint8_t buf[1024];
+ } temp;
+
+ struct xz_dec_lzma2 *lzma2;
+
+#ifdef XZ_DEC_BCJ
+ struct xz_dec_bcj *bcj;
+ bool bcj_active;
+#endif
+};
+
+#ifdef XZ_DEC_ANY_CHECK
+/* Sizes of the Check field with different Check IDs */
+static const uint8_t check_sizes[16] = {
+ 0,
+ 4, 4, 4,
+ 8, 8, 8,
+ 16, 16, 16,
+ 32, 32, 32,
+ 64, 64, 64
+};
+#endif
+
+/*
+ * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
+ * must have set s->temp.pos to indicate how much data we are supposed
+ * to copy into s->temp.buf. Return true once s->temp.pos has reached
+ * s->temp.size.
+ */
+static bool fill_temp(struct xz_dec *s, struct xz_buf *b)
+{
+ size_t copy_size = min_t(size_t,
+ b->in_size - b->in_pos, s->temp.size - s->temp.pos);
+
+ memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
+ b->in_pos += copy_size;
+ s->temp.pos += copy_size;
+
+ if (s->temp.pos == s->temp.size) {
+ s->temp.pos = 0;
+ return true;
+ }
+
+ return false;
+}
+
+/* Decode a variable-length integer (little-endian base-128 encoding) */
+static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in,
+ size_t *in_pos, size_t in_size)
+{
+ uint8_t byte;
+
+ if (s->pos == 0)
+ s->vli = 0;
+
+ while (*in_pos < in_size) {
+ byte = in[*in_pos];
+ ++*in_pos;
+
+ s->vli |= (vli_type)(byte & 0x7F) << s->pos;
+
+ if ((byte & 0x80) == 0) {
+ /* Don't allow non-minimal encodings. */
+ if (byte == 0 && s->pos != 0)
+ return XZ_DATA_ERROR;
+
+ s->pos = 0;
+ return XZ_STREAM_END;
+ }
+
+ s->pos += 7;
+ if (s->pos == 7 * VLI_BYTES_MAX)
+ return XZ_DATA_ERROR;
+ }
+
+ return XZ_OK;
+}
+
+/*
+ * Decode the Compressed Data field from a Block. Update and validate
+ * the observed compressed and uncompressed sizes of the Block so that
+ * they don't exceed the values possibly stored in the Block Header
+ * (validation assumes that no integer overflow occurs, since vli_type
+ * is normally uint64_t). Update the CRC32 if presence of the CRC32
+ * field was indicated in Stream Header.
+ *
+ * Once the decoding is finished, validate that the observed sizes match
+ * the sizes possibly stored in the Block Header. Update the hash and
+ * Block count, which are later used to validate the Index field.
+ */
+static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b)
+{
+ enum xz_ret ret;
+
+ s->in_start = b->in_pos;
+ s->out_start = b->out_pos;
+
+#ifdef XZ_DEC_BCJ
+ if (s->bcj_active)
+ ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
+ else
+#endif
+ ret = xz_dec_lzma2_run(s->lzma2, b);
+
+ s->block.compressed += b->in_pos - s->in_start;
+ s->block.uncompressed += b->out_pos - s->out_start;
+
+ /*
+ * There is no need to separately check for VLI_UNKNOWN, since
+ * the observed sizes are always smaller than VLI_UNKNOWN.
+ */
+ if (s->block.compressed > s->block_header.compressed
+ || s->block.uncompressed
+ > s->block_header.uncompressed)
+ return XZ_DATA_ERROR;
+
+ if (s->check_type == XZ_CHECK_CRC32)
+ s->crc32 = xz_crc32(b->out + s->out_start,
+ b->out_pos - s->out_start, s->crc32);
+
+ if (ret == XZ_STREAM_END) {
+ if (s->block_header.compressed != VLI_UNKNOWN
+ && s->block_header.compressed
+ != s->block.compressed)
+ return XZ_DATA_ERROR;
+
+ if (s->block_header.uncompressed != VLI_UNKNOWN
+ && s->block_header.uncompressed
+ != s->block.uncompressed)
+ return XZ_DATA_ERROR;
+
+ s->block.hash.unpadded += s->block_header.size
+ + s->block.compressed;
+
+#ifdef XZ_DEC_ANY_CHECK
+ s->block.hash.unpadded += check_sizes[s->check_type];
+#else
+ if (s->check_type == XZ_CHECK_CRC32)
+ s->block.hash.unpadded += 4;
+#endif
+
+ s->block.hash.uncompressed += s->block.uncompressed;
+ s->block.hash.crc32 = xz_crc32(
+ (const uint8_t *)&s->block.hash,
+ sizeof(s->block.hash), s->block.hash.crc32);
+
+ ++s->block.count;
+ }
+
+ return ret;
+}
+
+/* Update the Index size and the CRC32 value. */
+static void index_update(struct xz_dec *s, const struct xz_buf *b)
+{
+ size_t in_used = b->in_pos - s->in_start;
+ s->index.size += in_used;
+ s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32);
+}
+
+/*
+ * Decode the Number of Records, Unpadded Size, and Uncompressed Size
+ * fields from the Index field. That is, Index Padding and CRC32 are not
+ * decoded by this function.
+ *
+ * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
+ * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
+ */
+static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
+{
+ enum xz_ret ret;
+
+ do {
+ ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
+ if (ret != XZ_STREAM_END) {
+ index_update(s, b);
+ return ret;
+ }
+
+ switch (s->index.sequence) {
+ case SEQ_INDEX_COUNT:
+ s->index.count = s->vli;
+
+ /*
+ * Validate that the Number of Records field
+ * indicates the same number of Records as
+ * there were Blocks in the Stream.
+ */
+ if (s->index.count != s->block.count)
+ return XZ_DATA_ERROR;
+
+ s->index.sequence = SEQ_INDEX_UNPADDED;
+ break;
+
+ case SEQ_INDEX_UNPADDED:
+ s->index.hash.unpadded += s->vli;
+ s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
+ break;
+
+ case SEQ_INDEX_UNCOMPRESSED:
+ s->index.hash.uncompressed += s->vli;
+ s->index.hash.crc32 = xz_crc32(
+ (const uint8_t *)&s->index.hash,
+ sizeof(s->index.hash),
+ s->index.hash.crc32);
+ --s->index.count;
+ s->index.sequence = SEQ_INDEX_UNPADDED;
+ break;
+ }
+ } while (s->index.count > 0);
+
+ return XZ_STREAM_END;
+}
+
+/*
+ * Validate that the next four input bytes match the value of s->crc32.
+ * s->pos must be zero when starting to validate the first byte.
+ */
+static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b)
+{
+ do {
+ if (b->in_pos == b->in_size)
+ return XZ_OK;
+
+ if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++])
+ return XZ_DATA_ERROR;
+
+ s->pos += 8;
+
+ } while (s->pos < 32);
+
+ s->crc32 = 0;
+ s->pos = 0;
+
+ return XZ_STREAM_END;
+}
+
+#ifdef XZ_DEC_ANY_CHECK
+/*
+ * Skip over the Check field when the Check ID is not supported.
+ * Returns true once the whole Check field has been skipped over.
+ */
+static bool check_skip(struct xz_dec *s, struct xz_buf *b)
+{
+ while (s->pos < check_sizes[s->check_type]) {
+ if (b->in_pos == b->in_size)
+ return false;
+
+ ++b->in_pos;
+ ++s->pos;
+ }
+
+ s->pos = 0;
+
+ return true;
+}
+#endif
+
+/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
+static enum xz_ret dec_stream_header(struct xz_dec *s)
+{
+ if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
+ return XZ_FORMAT_ERROR;
+
+ if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
+ != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
+ return XZ_DATA_ERROR;
+
+ if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
+ return XZ_OPTIONS_ERROR;
+
+ /*
+ * Of integrity checks, we support only none (Check ID = 0) and
+ * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined,
+ * we will accept other check types too, but then the check won't
+ * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
+ */
+ if (s->temp.buf[HEADER_MAGIC_SIZE + 1] > XZ_CHECK_MAX)
+ return XZ_OPTIONS_ERROR;
+
+ s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
+
+#ifdef XZ_DEC_ANY_CHECK
+ if (s->check_type > XZ_CHECK_CRC32)
+ return XZ_UNSUPPORTED_CHECK;
+#else
+ if (s->check_type > XZ_CHECK_CRC32)
+ return XZ_OPTIONS_ERROR;
+#endif
+
+ return XZ_OK;
+}
+
+/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
+static enum xz_ret dec_stream_footer(struct xz_dec *s)
+{
+ if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
+ return XZ_DATA_ERROR;
+
+ if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
+ return XZ_DATA_ERROR;
+
+ /*
+ * Validate Backward Size. Note that we never added the size of the
+ * Index CRC32 field to s->index.size, thus we use s->index.size / 4
+ * instead of s->index.size / 4 - 1.
+ */
+ if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
+ return XZ_DATA_ERROR;
+
+ if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
+ return XZ_DATA_ERROR;
+
+ /*
+ * Use XZ_STREAM_END instead of XZ_OK to be more convenient
+ * for the caller.
+ */
+ return XZ_STREAM_END;
+}
+
+/* Decode the Block Header and initialize the filter chain. */
+static enum xz_ret dec_block_header(struct xz_dec *s)
+{
+ enum xz_ret ret;
+
+ /*
+ * Validate the CRC32. We know that the temp buffer is at least
+ * eight bytes so this is safe.
+ */
+ s->temp.size -= 4;
+ if (xz_crc32(s->temp.buf, s->temp.size, 0)
+ != get_le32(s->temp.buf + s->temp.size))
+ return XZ_DATA_ERROR;
+
+ s->temp.pos = 2;
+
+ /*
+ * Catch unsupported Block Flags. We support only one or two filters
+ * in the chain, so we catch that with the same test.
+ */
+#ifdef XZ_DEC_BCJ
+ if (s->temp.buf[1] & 0x3E)
+#else
+ if (s->temp.buf[1] & 0x3F)
+#endif
+ return XZ_OPTIONS_ERROR;
+
+ /* Compressed Size */
+ if (s->temp.buf[1] & 0x40) {
+ if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+ != XZ_STREAM_END)
+ return XZ_DATA_ERROR;
+
+ s->block_header.compressed = s->vli;
+ } else {
+ s->block_header.compressed = VLI_UNKNOWN;
+ }
+
+ /* Uncompressed Size */
+ if (s->temp.buf[1] & 0x80) {
+ if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+ != XZ_STREAM_END)
+ return XZ_DATA_ERROR;
+
+ s->block_header.uncompressed = s->vli;
+ } else {
+ s->block_header.uncompressed = VLI_UNKNOWN;
+ }
+
+#ifdef XZ_DEC_BCJ
+ /* If there are two filters, the first one must be a BCJ filter. */
+ s->bcj_active = s->temp.buf[1] & 0x01;
+ if (s->bcj_active) {
+ if (s->temp.size - s->temp.pos < 2)
+ return XZ_OPTIONS_ERROR;
+
+ ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
+ if (ret != XZ_OK)
+ return ret;
+
+ /*
+ * We don't support custom start offset,
+ * so Size of Properties must be zero.
+ */
+ if (s->temp.buf[s->temp.pos++] != 0x00)
+ return XZ_OPTIONS_ERROR;
+ }
+#endif
+
+ /* Valid Filter Flags always take at least two bytes. */
+ if (s->temp.size - s->temp.pos < 2)
+ return XZ_DATA_ERROR;
+
+ /* Filter ID = LZMA2 */
+ if (s->temp.buf[s->temp.pos++] != 0x21)
+ return XZ_OPTIONS_ERROR;
+
+ /* Size of Properties = 1-byte Filter Properties */
+ if (s->temp.buf[s->temp.pos++] != 0x01)
+ return XZ_OPTIONS_ERROR;
+
+ /* Filter Properties contains LZMA2 dictionary size. */
+ if (s->temp.size - s->temp.pos < 1)
+ return XZ_DATA_ERROR;
+
+ ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
+ if (ret != XZ_OK)
+ return ret;
+
+ /* The rest must be Header Padding. */
+ while (s->temp.pos < s->temp.size)
+ if (s->temp.buf[s->temp.pos++] != 0x00)
+ return XZ_OPTIONS_ERROR;
+
+ s->temp.pos = 0;
+ s->block.compressed = 0;
+ s->block.uncompressed = 0;
+
+ return XZ_OK;
+}
+
+static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
+{
+ enum xz_ret ret;
+
+ /*
+ * Store the start position for the case when we are in the middle
+ * of the Index field.
+ */
+ s->in_start = b->in_pos;
+
+ while (true) {
+ switch (s->sequence) {
+ case SEQ_STREAM_HEADER:
+ /*
+ * Stream Header is copied to s->temp, and then
+ * decoded from there. This way if the caller
+ * gives us only little input at a time, we can
+ * still keep the Stream Header decoding code
+ * simple. Similar approach is used in many places
+ * in this file.
+ */
+ if (!fill_temp(s, b))
+ return XZ_OK;
+
+ /*
+ * If dec_stream_header() returns
+ * XZ_UNSUPPORTED_CHECK, it is still possible
+ * to continue decoding if working in multi-call
+ * mode. Thus, update s->sequence before calling
+ * dec_stream_header().
+ */
+ s->sequence = SEQ_BLOCK_START;
+
+ ret = dec_stream_header(s);
+ if (ret != XZ_OK)
+ return ret;
+
+ fallthrough;
+
+ case SEQ_BLOCK_START:
+ /* We need one byte of input to continue. */
+ if (b->in_pos == b->in_size)
+ return XZ_OK;
+
+ /* See if this is the beginning of the Index field. */
+ if (b->in[b->in_pos] == 0) {
+ s->in_start = b->in_pos++;
+ s->sequence = SEQ_INDEX;
+ break;
+ }
+
+ /*
+ * Calculate the size of the Block Header and
+ * prepare to decode it.
+ */
+ s->block_header.size
+ = ((uint32_t)b->in[b->in_pos] + 1) * 4;
+
+ s->temp.size = s->block_header.size;
+ s->temp.pos = 0;
+ s->sequence = SEQ_BLOCK_HEADER;
+
+ fallthrough;
+
+ case SEQ_BLOCK_HEADER:
+ if (!fill_temp(s, b))
+ return XZ_OK;
+
+ ret = dec_block_header(s);
+ if (ret != XZ_OK)
+ return ret;
+
+ s->sequence = SEQ_BLOCK_UNCOMPRESS;
+
+ fallthrough;
+
+ case SEQ_BLOCK_UNCOMPRESS:
+ ret = dec_block(s, b);
+ if (ret != XZ_STREAM_END)
+ return ret;
+
+ s->sequence = SEQ_BLOCK_PADDING;
+
+ fallthrough;
+
+ case SEQ_BLOCK_PADDING:
+ /*
+ * Size of Compressed Data + Block Padding
+ * must be a multiple of four. We don't need
+ * s->block.compressed for anything else
+ * anymore, so we use it here to test the size
+ * of the Block Padding field.
+ */
+ while (s->block.compressed & 3) {
+ if (b->in_pos == b->in_size)
+ return XZ_OK;
+
+ if (b->in[b->in_pos++] != 0)
+ return XZ_DATA_ERROR;
+
+ ++s->block.compressed;
+ }
+
+ s->sequence = SEQ_BLOCK_CHECK;
+
+ fallthrough;
+
+ case SEQ_BLOCK_CHECK:
+ if (s->check_type == XZ_CHECK_CRC32) {
+ ret = crc32_validate(s, b);
+ if (ret != XZ_STREAM_END)
+ return ret;
+ }
+#ifdef XZ_DEC_ANY_CHECK
+ else if (!check_skip(s, b)) {
+ return XZ_OK;
+ }
+#endif
+
+ s->sequence = SEQ_BLOCK_START;
+ break;
+
+ case SEQ_INDEX:
+ ret = dec_index(s, b);
+ if (ret != XZ_STREAM_END)
+ return ret;
+
+ s->sequence = SEQ_INDEX_PADDING;
+
+ fallthrough;
+
+ case SEQ_INDEX_PADDING:
+ while ((s->index.size + (b->in_pos - s->in_start))
+ & 3) {
+ if (b->in_pos == b->in_size) {
+ index_update(s, b);
+ return XZ_OK;
+ }
+
+ if (b->in[b->in_pos++] != 0)
+ return XZ_DATA_ERROR;
+ }
+
+ /* Finish the CRC32 value and Index size. */
+ index_update(s, b);
+
+ /* Compare the hashes to validate the Index field. */
+ if (!memeq(&s->block.hash, &s->index.hash,
+ sizeof(s->block.hash)))
+ return XZ_DATA_ERROR;
+
+ s->sequence = SEQ_INDEX_CRC32;
+
+ fallthrough;
+
+ case SEQ_INDEX_CRC32:
+ ret = crc32_validate(s, b);
+ if (ret != XZ_STREAM_END)
+ return ret;
+
+ s->temp.size = STREAM_HEADER_SIZE;
+ s->sequence = SEQ_STREAM_FOOTER;
+
+ fallthrough;
+
+ case SEQ_STREAM_FOOTER:
+ if (!fill_temp(s, b))
+ return XZ_OK;
+
+ return dec_stream_footer(s);
+ }
+ }
+
+ /* Never reached */
+}
+
+/*
+ * xz_dec_run() is a wrapper for dec_main() to handle some special cases in
+ * multi-call and single-call decoding.
+ *
+ * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
+ * are not going to make any progress anymore. This is to prevent the caller
+ * from calling us infinitely when the input file is truncated or otherwise
+ * corrupt. Since zlib-style API allows that the caller fills the input buffer
+ * only when the decoder doesn't produce any new output, we have to be careful
+ * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
+ * after the second consecutive call to xz_dec_run() that makes no progress.
+ *
+ * In single-call mode, if we couldn't decode everything and no error
+ * occurred, either the input is truncated or the output buffer is too small.
+ * Since we know that the last input byte never produces any output, we know
+ * that if all the input was consumed and decoding wasn't finished, the file
+ * must be corrupt. Otherwise the output buffer has to be too small or the
+ * file is corrupt in a way that decoding it produces too big output.
+ *
+ * If single-call decoding fails, we reset b->in_pos and b->out_pos back to
+ * their original values. This is because with some filter chains there won't
+ * be any valid uncompressed data in the output buffer unless the decoding
+ * actually succeeds (that's the price to pay of using the output buffer as
+ * the workspace).
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+{
+ size_t in_start;
+ size_t out_start;
+ enum xz_ret ret;
+
+ if (DEC_IS_SINGLE(s->mode))
+ xz_dec_reset(s);
+
+ in_start = b->in_pos;
+ out_start = b->out_pos;
+ ret = dec_main(s, b);
+
+ if (DEC_IS_SINGLE(s->mode)) {
+ if (ret == XZ_OK)
+ ret = b->in_pos == b->in_size
+ ? XZ_DATA_ERROR : XZ_BUF_ERROR;
+
+ if (ret != XZ_STREAM_END) {
+ b->in_pos = in_start;
+ b->out_pos = out_start;
+ }
+
+ } else if (ret == XZ_OK && in_start == b->in_pos
+ && out_start == b->out_pos) {
+ if (s->allow_buf_error)
+ ret = XZ_BUF_ERROR;
+
+ s->allow_buf_error = true;
+ } else {
+ s->allow_buf_error = false;
+ }
+
+ return ret;
+}
+
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+{
+ struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s == NULL)
+ return NULL;
+
+ s->mode = mode;
+
+#ifdef XZ_DEC_BCJ
+ s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode));
+ if (s->bcj == NULL)
+ goto error_bcj;
+#endif
+
+ s->lzma2 = xz_dec_lzma2_create(mode, dict_max);
+ if (s->lzma2 == NULL)
+ goto error_lzma2;
+
+ xz_dec_reset(s);
+ return s;
+
+error_lzma2:
+#ifdef XZ_DEC_BCJ
+ xz_dec_bcj_end(s->bcj);
+error_bcj:
+#endif
+ kfree(s);
+ return NULL;
+}
+
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+{
+ s->sequence = SEQ_STREAM_HEADER;
+ s->allow_buf_error = false;
+ s->pos = 0;
+ s->crc32 = 0;
+ memzero(&s->block, sizeof(s->block));
+ memzero(&s->index, sizeof(s->index));
+ s->temp.pos = 0;
+ s->temp.size = STREAM_HEADER_SIZE;
+}
+
+XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+{
+ if (s != NULL) {
+ xz_dec_lzma2_end(s->lzma2);
+#ifdef XZ_DEC_BCJ
+ xz_dec_bcj_end(s->bcj);
+#endif
+ kfree(s);
+ }
+}
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
new file mode 100644
index 000000000..61098c67a
--- /dev/null
+++ b/lib/xz/xz_dec_syms.c
@@ -0,0 +1,33 @@
+/*
+ * XZ decoder module information
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/module.h>
+#include <linux/xz.h>
+
+EXPORT_SYMBOL(xz_dec_init);
+EXPORT_SYMBOL(xz_dec_reset);
+EXPORT_SYMBOL(xz_dec_run);
+EXPORT_SYMBOL(xz_dec_end);
+
+#ifdef CONFIG_XZ_DEC_MICROLZMA
+EXPORT_SYMBOL(xz_dec_microlzma_alloc);
+EXPORT_SYMBOL(xz_dec_microlzma_reset);
+EXPORT_SYMBOL(xz_dec_microlzma_run);
+EXPORT_SYMBOL(xz_dec_microlzma_end);
+#endif
+
+MODULE_DESCRIPTION("XZ decompressor");
+MODULE_VERSION("1.1");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
new file mode 100644
index 000000000..da28a19d6
--- /dev/null
+++ b/lib/xz/xz_dec_test.c
@@ -0,0 +1,220 @@
+/*
+ * XZ decoder tester
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/crc32.h>
+#include <linux/xz.h>
+
+/* Maximum supported dictionary size */
+#define DICT_MAX (1 << 20)
+
+/* Device name to pass to register_chrdev(). */
+#define DEVICE_NAME "xz_dec_test"
+
+/* Dynamically allocated device major number */
+static int device_major;
+
+/*
+ * We reuse the same decoder state, and thus can decode only one
+ * file at a time.
+ */
+static bool device_is_open;
+
+/* XZ decoder state */
+static struct xz_dec *state;
+
+/*
+ * Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after
+ * it has returned XZ_STREAM_END, so we make this static.
+ */
+static enum xz_ret ret;
+
+/*
+ * Input and output buffers. The input buffer is used as a temporary safe
+ * place for the data coming from the userspace.
+ */
+static uint8_t buffer_in[1024];
+static uint8_t buffer_out[1024];
+
+/*
+ * Structure to pass the input and output buffers to the XZ decoder.
+ * A few of the fields are never modified so we initialize them here.
+ */
+static struct xz_buf buffers = {
+ .in = buffer_in,
+ .out = buffer_out,
+ .out_size = sizeof(buffer_out)
+};
+
+/*
+ * CRC32 of uncompressed data. This is used to give the user a simple way
+ * to check that the decoder produces correct output.
+ */
+static uint32_t crc;
+
+static int xz_dec_test_open(struct inode *i, struct file *f)
+{
+ if (device_is_open)
+ return -EBUSY;
+
+ device_is_open = true;
+
+ xz_dec_reset(state);
+ ret = XZ_OK;
+ crc = 0xFFFFFFFF;
+
+ buffers.in_pos = 0;
+ buffers.in_size = 0;
+ buffers.out_pos = 0;
+
+ printk(KERN_INFO DEVICE_NAME ": opened\n");
+ return 0;
+}
+
+static int xz_dec_test_release(struct inode *i, struct file *f)
+{
+ device_is_open = false;
+
+ if (ret == XZ_OK)
+ printk(KERN_INFO DEVICE_NAME ": input was truncated\n");
+
+ printk(KERN_INFO DEVICE_NAME ": closed\n");
+ return 0;
+}
+
+/*
+ * Decode the data given to us from the userspace. CRC32 of the uncompressed
+ * data is calculated and is printed at the end of successful decoding. The
+ * uncompressed data isn't stored anywhere for further use.
+ *
+ * The .xz file must have exactly one Stream and no Stream Padding. The data
+ * after the first Stream is considered to be garbage.
+ */
+static ssize_t xz_dec_test_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ size_t remaining;
+
+ if (ret != XZ_OK) {
+ if (size > 0)
+ printk(KERN_INFO DEVICE_NAME ": %zu bytes of "
+ "garbage at the end of the file\n",
+ size);
+
+ return -ENOSPC;
+ }
+
+ printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n",
+ size);
+
+ remaining = size;
+ while ((remaining > 0 || buffers.out_pos == buffers.out_size)
+ && ret == XZ_OK) {
+ if (buffers.in_pos == buffers.in_size) {
+ buffers.in_pos = 0;
+ buffers.in_size = min(remaining, sizeof(buffer_in));
+ if (copy_from_user(buffer_in, buf, buffers.in_size))
+ return -EFAULT;
+
+ buf += buffers.in_size;
+ remaining -= buffers.in_size;
+ }
+
+ buffers.out_pos = 0;
+ ret = xz_dec_run(state, &buffers);
+ crc = crc32(crc, buffer_out, buffers.out_pos);
+ }
+
+ switch (ret) {
+ case XZ_OK:
+ printk(KERN_INFO DEVICE_NAME ": XZ_OK\n");
+ return size;
+
+ case XZ_STREAM_END:
+ printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, "
+ "CRC32 = 0x%08X\n", ~crc);
+ return size - remaining - (buffers.in_size - buffers.in_pos);
+
+ case XZ_MEMLIMIT_ERROR:
+ printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n");
+ break;
+
+ case XZ_FORMAT_ERROR:
+ printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n");
+ break;
+
+ case XZ_OPTIONS_ERROR:
+ printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n");
+ break;
+
+ case XZ_DATA_ERROR:
+ printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n");
+ break;
+
+ case XZ_BUF_ERROR:
+ printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n");
+ break;
+
+ default:
+ printk(KERN_INFO DEVICE_NAME ": Bug detected!\n");
+ break;
+ }
+
+ return -EIO;
+}
+
+/* Allocate the XZ decoder state and register the character device. */
+static int __init xz_dec_test_init(void)
+{
+ static const struct file_operations fileops = {
+ .owner = THIS_MODULE,
+ .open = &xz_dec_test_open,
+ .release = &xz_dec_test_release,
+ .write = &xz_dec_test_write
+ };
+
+ state = xz_dec_init(XZ_PREALLOC, DICT_MAX);
+ if (state == NULL)
+ return -ENOMEM;
+
+ device_major = register_chrdev(0, DEVICE_NAME, &fileops);
+ if (device_major < 0) {
+ xz_dec_end(state);
+ return device_major;
+ }
+
+ printk(KERN_INFO DEVICE_NAME ": module loaded\n");
+ printk(KERN_INFO DEVICE_NAME ": Create a device node with "
+ "'mknod " DEVICE_NAME " c %d 0' and write .xz files "
+ "to it.\n", device_major);
+ return 0;
+}
+
+static void __exit xz_dec_test_exit(void)
+{
+ unregister_chrdev(device_major, DEVICE_NAME);
+ xz_dec_end(state);
+ printk(KERN_INFO DEVICE_NAME ": module unloaded\n");
+}
+
+module_init(xz_dec_test_init);
+module_exit(xz_dec_test_exit);
+
+MODULE_DESCRIPTION("XZ decompressor tester");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
new file mode 100644
index 000000000..92d852d4f
--- /dev/null
+++ b/lib/xz/xz_lzma2.h
@@ -0,0 +1,204 @@
+/*
+ * LZMA2 definitions
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ * Igor Pavlov <https://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_LZMA2_H
+#define XZ_LZMA2_H
+
+/* Range coder constants */
+#define RC_SHIFT_BITS 8
+#define RC_TOP_BITS 24
+#define RC_TOP_VALUE (1 << RC_TOP_BITS)
+#define RC_BIT_MODEL_TOTAL_BITS 11
+#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS)
+#define RC_MOVE_BITS 5
+
+/*
+ * Maximum number of position states. A position state is the lowest pb
+ * number of bits of the current uncompressed offset. In some places there
+ * are different sets of probabilities for different position states.
+ */
+#define POS_STATES_MAX (1 << 4)
+
+/*
+ * This enum is used to track which LZMA symbols have occurred most recently
+ * and in which order. This information is used to predict the next symbol.
+ *
+ * Symbols:
+ * - Literal: One 8-bit byte
+ * - Match: Repeat a chunk of data at some distance
+ * - Long repeat: Multi-byte match at a recently seen distance
+ * - Short repeat: One-byte repeat at a recently seen distance
+ *
+ * The symbol names are in from STATE_oldest_older_previous. REP means
+ * either short or long repeated match, and NONLIT means any non-literal.
+ */
+enum lzma_state {
+ STATE_LIT_LIT,
+ STATE_MATCH_LIT_LIT,
+ STATE_REP_LIT_LIT,
+ STATE_SHORTREP_LIT_LIT,
+ STATE_MATCH_LIT,
+ STATE_REP_LIT,
+ STATE_SHORTREP_LIT,
+ STATE_LIT_MATCH,
+ STATE_LIT_LONGREP,
+ STATE_LIT_SHORTREP,
+ STATE_NONLIT_MATCH,
+ STATE_NONLIT_REP
+};
+
+/* Total number of states */
+#define STATES 12
+
+/* The lowest 7 states indicate that the previous state was a literal. */
+#define LIT_STATES 7
+
+/* Indicate that the latest symbol was a literal. */
+static inline void lzma_state_literal(enum lzma_state *state)
+{
+ if (*state <= STATE_SHORTREP_LIT_LIT)
+ *state = STATE_LIT_LIT;
+ else if (*state <= STATE_LIT_SHORTREP)
+ *state -= 3;
+ else
+ *state -= 6;
+}
+
+/* Indicate that the latest symbol was a match. */
+static inline void lzma_state_match(enum lzma_state *state)
+{
+ *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH;
+}
+
+/* Indicate that the latest state was a long repeated match. */
+static inline void lzma_state_long_rep(enum lzma_state *state)
+{
+ *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP;
+}
+
+/* Indicate that the latest symbol was a short match. */
+static inline void lzma_state_short_rep(enum lzma_state *state)
+{
+ *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP;
+}
+
+/* Test if the previous symbol was a literal. */
+static inline bool lzma_state_is_literal(enum lzma_state state)
+{
+ return state < LIT_STATES;
+}
+
+/* Each literal coder is divided in three sections:
+ * - 0x001-0x0FF: Without match byte
+ * - 0x101-0x1FF: With match byte; match bit is 0
+ * - 0x201-0x2FF: With match byte; match bit is 1
+ *
+ * Match byte is used when the previous LZMA symbol was something else than
+ * a literal (that is, it was some kind of match).
+ */
+#define LITERAL_CODER_SIZE 0x300
+
+/* Maximum number of literal coders */
+#define LITERAL_CODERS_MAX (1 << 4)
+
+/* Minimum length of a match is two bytes. */
+#define MATCH_LEN_MIN 2
+
+/* Match length is encoded with 4, 5, or 10 bits.
+ *
+ * Length Bits
+ * 2-9 4 = Choice=0 + 3 bits
+ * 10-17 5 = Choice=1 + Choice2=0 + 3 bits
+ * 18-273 10 = Choice=1 + Choice2=1 + 8 bits
+ */
+#define LEN_LOW_BITS 3
+#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
+#define LEN_MID_BITS 3
+#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
+#define LEN_HIGH_BITS 8
+#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
+#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
+
+/*
+ * Maximum length of a match is 273 which is a result of the encoding
+ * described above.
+ */
+#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
+
+/*
+ * Different sets of probabilities are used for match distances that have
+ * very short match length: Lengths of 2, 3, and 4 bytes have a separate
+ * set of probabilities for each length. The matches with longer length
+ * use a shared set of probabilities.
+ */
+#define DIST_STATES 4
+
+/*
+ * Get the index of the appropriate probability array for decoding
+ * the distance slot.
+ */
+static inline uint32_t lzma_get_dist_state(uint32_t len)
+{
+ return len < DIST_STATES + MATCH_LEN_MIN
+ ? len - MATCH_LEN_MIN : DIST_STATES - 1;
+}
+
+/*
+ * The highest two bits of a 32-bit match distance are encoded using six bits.
+ * This six-bit value is called a distance slot. This way encoding a 32-bit
+ * value takes 6-36 bits, larger values taking more bits.
+ */
+#define DIST_SLOT_BITS 6
+#define DIST_SLOTS (1 << DIST_SLOT_BITS)
+
+/* Match distances up to 127 are fully encoded using probabilities. Since
+ * the highest two bits (distance slot) are always encoded using six bits,
+ * the distances 0-3 don't need any additional bits to encode, since the
+ * distance slot itself is the same as the actual distance. DIST_MODEL_START
+ * indicates the first distance slot where at least one additional bit is
+ * needed.
+ */
+#define DIST_MODEL_START 4
+
+/*
+ * Match distances greater than 127 are encoded in three pieces:
+ * - distance slot: the highest two bits
+ * - direct bits: 2-26 bits below the highest two bits
+ * - alignment bits: four lowest bits
+ *
+ * Direct bits don't use any probabilities.
+ *
+ * The distance slot value of 14 is for distances 128-191.
+ */
+#define DIST_MODEL_END 14
+
+/* Distance slots that indicate a distance <= 127. */
+#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
+#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
+
+/*
+ * For match distances greater than 127, only the highest two bits and the
+ * lowest four bits (alignment) is encoded using probabilities.
+ */
+#define ALIGN_BITS 4
+#define ALIGN_SIZE (1 << ALIGN_BITS)
+#define ALIGN_MASK (ALIGN_SIZE - 1)
+
+/* Total number of all probability variables */
+#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE)
+
+/*
+ * LZMA remembers the four most recent match distances. Reusing these
+ * distances tends to take less space than re-encoding the actual
+ * distance value.
+ */
+#define REPS 4
+
+#endif
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
new file mode 100644
index 000000000..bf1e94ec7
--- /dev/null
+++ b/lib/xz/xz_private.h
@@ -0,0 +1,163 @@
+/*
+ * Private includes and definitions
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_PRIVATE_H
+#define XZ_PRIVATE_H
+
+#ifdef __KERNEL__
+# include <linux/xz.h>
+# include <linux/kernel.h>
+# include <asm/unaligned.h>
+ /* XZ_PREBOOT may be defined only via decompress_unxz.c. */
+# ifndef XZ_PREBOOT
+# include <linux/slab.h>
+# include <linux/vmalloc.h>
+# include <linux/string.h>
+# ifdef CONFIG_XZ_DEC_X86
+# define XZ_DEC_X86
+# endif
+# ifdef CONFIG_XZ_DEC_POWERPC
+# define XZ_DEC_POWERPC
+# endif
+# ifdef CONFIG_XZ_DEC_IA64
+# define XZ_DEC_IA64
+# endif
+# ifdef CONFIG_XZ_DEC_ARM
+# define XZ_DEC_ARM
+# endif
+# ifdef CONFIG_XZ_DEC_ARMTHUMB
+# define XZ_DEC_ARMTHUMB
+# endif
+# ifdef CONFIG_XZ_DEC_SPARC
+# define XZ_DEC_SPARC
+# endif
+# ifdef CONFIG_XZ_DEC_MICROLZMA
+# define XZ_DEC_MICROLZMA
+# endif
+# define memeq(a, b, size) (memcmp(a, b, size) == 0)
+# define memzero(buf, size) memset(buf, 0, size)
+# endif
+# define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+#else
+ /*
+ * For userspace builds, use a separate header to define the required
+ * macros and functions. This makes it easier to adapt the code into
+ * different environments and avoids clutter in the Linux kernel tree.
+ */
+# include "xz_config.h"
+#endif
+
+/* If no specific decoding mode is requested, enable support for all modes. */
+#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \
+ && !defined(XZ_DEC_DYNALLOC)
+# define XZ_DEC_SINGLE
+# define XZ_DEC_PREALLOC
+# define XZ_DEC_DYNALLOC
+#endif
+
+/*
+ * The DEC_IS_foo(mode) macros are used in "if" statements. If only some
+ * of the supported modes are enabled, these macros will evaluate to true or
+ * false at compile time and thus allow the compiler to omit unneeded code.
+ */
+#ifdef XZ_DEC_SINGLE
+# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE)
+#else
+# define DEC_IS_SINGLE(mode) (false)
+#endif
+
+#ifdef XZ_DEC_PREALLOC
+# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC)
+#else
+# define DEC_IS_PREALLOC(mode) (false)
+#endif
+
+#ifdef XZ_DEC_DYNALLOC
+# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC)
+#else
+# define DEC_IS_DYNALLOC(mode) (false)
+#endif
+
+#if !defined(XZ_DEC_SINGLE)
+# define DEC_IS_MULTI(mode) (true)
+#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC)
+# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE)
+#else
+# define DEC_IS_MULTI(mode) (false)
+#endif
+
+/*
+ * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ.
+ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders.
+ */
+#ifndef XZ_DEC_BCJ
+# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
+ || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+ || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
+ || defined(XZ_DEC_SPARC)
+# define XZ_DEC_BCJ
+# endif
+#endif
+
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
+/*
+ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
+ * before calling xz_dec_lzma2_run().
+ */
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+ uint32_t dict_max);
+
+/*
+ * Decode the LZMA2 properties (one byte) and reset the decoder. Return
+ * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not
+ * big enough, and XZ_OPTIONS_ERROR if props indicates something that this
+ * decoder doesn't support.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
+ uint8_t props);
+
+/* Decode raw LZMA2 stream from b->in to b->out. */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+ struct xz_buf *b);
+
+/* Free the memory allocated for the LZMA2 decoder. */
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+
+#ifdef XZ_DEC_BCJ
+/*
+ * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
+ * calling xz_dec_bcj_run().
+ */
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+
+/*
+ * Decode the Filter ID of a BCJ filter. This implementation doesn't
+ * support custom start offsets, so no decoding of Filter Properties
+ * is needed. Returns XZ_OK if the given Filter ID is supported.
+ * Otherwise XZ_OPTIONS_ERROR is returned.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+
+/*
+ * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
+ * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
+ * must be called directly.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+ struct xz_dec_lzma2 *lzma2,
+ struct xz_buf *b);
+
+/* Free the memory allocated for the BCJ filters. */
+#define xz_dec_bcj_end(s) kfree(s)
+#endif
+
+#endif
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
new file mode 100644
index 000000000..430bb3a0d
--- /dev/null
+++ b/lib/xz/xz_stream.h
@@ -0,0 +1,62 @@
+/*
+ * Definitions for handling the .xz file format
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_STREAM_H
+#define XZ_STREAM_H
+
+#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32
+# include <linux/crc32.h>
+# undef crc32
+# define xz_crc32(buf, size, crc) \
+ (~crc32_le(~(uint32_t)(crc), buf, size))
+#endif
+
+/*
+ * See the .xz file format specification at
+ * https://tukaani.org/xz/xz-file-format.txt
+ * to understand the container format.
+ */
+
+#define STREAM_HEADER_SIZE 12
+
+#define HEADER_MAGIC "\3757zXZ"
+#define HEADER_MAGIC_SIZE 6
+
+#define FOOTER_MAGIC "YZ"
+#define FOOTER_MAGIC_SIZE 2
+
+/*
+ * Variable-length integer can hold a 63-bit unsigned integer or a special
+ * value indicating that the value is unknown.
+ *
+ * Experimental: vli_type can be defined to uint32_t to save a few bytes
+ * in code size (no effect on speed). Doing so limits the uncompressed and
+ * compressed size of the file to less than 256 MiB and may also weaken
+ * error detection slightly.
+ */
+typedef uint64_t vli_type;
+
+#define VLI_MAX ((vli_type)-1 / 2)
+#define VLI_UNKNOWN ((vli_type)-1)
+
+/* Maximum encoded size of a VLI */
+#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7)
+
+/* Integrity Check types */
+enum xz_check {
+ XZ_CHECK_NONE = 0,
+ XZ_CHECK_CRC32 = 1,
+ XZ_CHECK_CRC64 = 4,
+ XZ_CHECK_SHA256 = 10
+};
+
+/* Maximum possible Check ID */
+#define XZ_CHECK_MAX 15
+
+#endif
diff --git a/lib/zlib_deflate/Makefile b/lib/zlib_deflate/Makefile
new file mode 100644
index 000000000..2622e03c0
--- /dev/null
+++ b/lib/zlib_deflate/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is the compression code, see zlib_inflate for the
+# decompression code.
+#
+
+obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate.o
+
+zlib_deflate-objs := deflate.o deftree.o deflate_syms.o
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
new file mode 100644
index 000000000..3a1d8d341
--- /dev/null
+++ b/lib/zlib_deflate/deflate.c
@@ -0,0 +1,1153 @@
+/* +++ deflate.c */
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in ftp://ds.internic.net/rfc/rfc1951.txt
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/zutil.h>
+#include "defutil.h"
+
+/* architecture-specific bits */
+#ifdef CONFIG_ZLIB_DFLTCC
+# include "../zlib_dfltcc/dfltcc_deflate.h"
+#else
+#define DEFLATE_RESET_HOOK(strm) do {} while (0)
+#define DEFLATE_HOOK(strm, flush, bstate) 0
+#define DEFLATE_NEED_CHECKSUM(strm) 1
+#define DEFLATE_DFLTCC_ENABLED() 0
+#endif
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+
+typedef block_state (*compress_func) (deflate_state *s, int flush);
+/* Compression function. Returns the block state after the call. */
+
+static void fill_window (deflate_state *s);
+static block_state deflate_stored (deflate_state *s, int flush);
+static block_state deflate_fast (deflate_state *s, int flush);
+static block_state deflate_slow (deflate_state *s, int flush);
+static void lm_init (deflate_state *s);
+static void putShortMSB (deflate_state *s, uInt b);
+static int read_buf (z_streamp strm, Byte *buf, unsigned size);
+static uInt longest_match (deflate_state *s, IPos cur_match);
+
+#ifdef DEBUG_ZLIB
+static void check_match (deflate_state *s, IPos start, IPos match,
+ int length);
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+/* Workspace to be allocated for deflate processing */
+typedef struct deflate_workspace {
+ /* State memory for the deflator */
+ deflate_state deflate_memory;
+#ifdef CONFIG_ZLIB_DFLTCC
+ /* State memory for s390 hardware deflate */
+ struct dfltcc_deflate_state dfltcc_memory;
+#endif
+ Byte *window_memory;
+ Pos *prev_memory;
+ Pos *head_memory;
+ char *overlay_memory;
+} deflate_workspace;
+
+#ifdef CONFIG_ZLIB_DFLTCC
+/* dfltcc_state must be doubleword aligned for DFLTCC call */
+static_assert(offsetof(struct deflate_workspace, dfltcc_memory) % 8 == 0);
+#endif
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+static const config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to UPDATE_HASH are made with consecutive
+ * input characters, so that a running hash key can be computed from the
+ * previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * IN assertion: all calls to INSERT_STRING are made with consecutive
+ * input characters and the first MIN_MATCH bytes of str are valid
+ * (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int zlib_deflateInit2(
+ z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy
+)
+{
+ deflate_state *s;
+ int noheader = 0;
+ deflate_workspace *mem;
+ char *next;
+
+ ush *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (strm == NULL) return Z_STREAM_ERROR;
+
+ strm->msg = NULL;
+
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+
+ mem = (deflate_workspace *) strm->workspace;
+
+ if (windowBits < 0) { /* undocumented feature: suppress zlib header */
+ noheader = 1;
+ windowBits = -windowBits;
+ }
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+
+ /*
+ * Direct the workspace's pointers to the chunks that were allocated
+ * along with the deflate_workspace struct.
+ */
+ next = (char *) mem;
+ next += sizeof(*mem);
+#ifdef CONFIG_ZLIB_DFLTCC
+ /*
+ * DFLTCC requires the window to be page aligned.
+ * Thus, we overallocate and take the aligned portion of the buffer.
+ */
+ mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE);
+#else
+ mem->window_memory = (Byte *) next;
+#endif
+ next += zlib_deflate_window_memsize(windowBits);
+ mem->prev_memory = (Pos *) next;
+ next += zlib_deflate_prev_memsize(windowBits);
+ mem->head_memory = (Pos *) next;
+ next += zlib_deflate_head_memsize(memLevel);
+ mem->overlay_memory = next;
+
+ s = (deflate_state *) &(mem->deflate_memory);
+ strm->state = (struct internal_state *)s;
+ s->strm = strm;
+
+ s->noheader = noheader;
+ s->w_bits = windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Byte *) mem->window_memory;
+ s->prev = (Pos *) mem->prev_memory;
+ s->head = (Pos *) mem->head_memory;
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ush *) mem->overlay_memory;
+ s->pending_buf = (uch *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return zlib_deflateReset(strm);
+}
+
+/* ========================================================================= */
+int zlib_deflateReset(
+ z_streamp strm
+)
+{
+ deflate_state *s;
+
+ if (strm == NULL || strm->state == NULL)
+ return Z_STREAM_ERROR;
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = NULL;
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->noheader < 0) {
+ s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
+ }
+ s->status = s->noheader ? BUSY_STATE : INIT_STATE;
+ strm->adler = 1;
+ s->last_flush = Z_NO_FLUSH;
+
+ zlib_tr_init(s);
+ lm_init(s);
+
+ DEFLATE_RESET_HOOK(strm);
+
+ return Z_OK;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+static void putShortMSB(
+ deflate_state *s,
+ uInt b
+)
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* ========================================================================= */
+int zlib_deflate(
+ z_streamp strm,
+ int flush
+)
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (strm == NULL || strm->state == NULL ||
+ flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) strm->state;
+
+ if ((strm->next_in == NULL && strm->avail_in != 0) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ return Z_STREAM_ERROR;
+ }
+ if (strm->avail_out == 0) return Z_BUF_ERROR;
+
+ s->strm = strm; /* just in case */
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Write the zlib header */
+ if (s->status == INIT_STATE) {
+
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags = (s->level-1) >> 1;
+
+ if (level_flags > 3) level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ s->status = BUSY_STATE;
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = 1L;
+ }
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && flush <= old_flush &&
+ flush != Z_FINISH) {
+ return Z_BUF_ERROR;
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ return Z_BUF_ERROR;
+ }
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = DEFLATE_HOOK(strm, flush, &bstate) ? bstate :
+ (*(configuration_table[s->level].func))(s, flush);
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ zlib_tr_align(s);
+ } else if (flush == Z_PACKET_FLUSH) {
+ /* Output just the 3-bit `stored' block type value,
+ but not a zero length. */
+ zlib_tr_stored_type_only(s);
+ } else { /* FULL_FLUSH or SYNC_FLUSH */
+ zlib_tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+ Assert(strm->avail_out > 0, "bug2");
+
+ if (flush != Z_FINISH) return Z_OK;
+
+ if (!s->noheader) {
+ /* Write zlib trailer (adler32) */
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ if (!s->noheader) {
+ s->noheader = -1; /* write the trailer only once! */
+ }
+ if (s->pending == 0) {
+ Assert(s->bi_valid == 0, "bi_buf not flushed");
+ return Z_STREAM_END;
+ }
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int zlib_deflateEnd(
+ z_streamp strm
+)
+{
+ int status;
+ deflate_state *s;
+
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ status = s->status;
+ if (status != INIT_STATE && status != BUSY_STATE &&
+ status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+
+ strm->state = NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+static int read_buf(
+ z_streamp strm,
+ Byte *buf,
+ unsigned size
+)
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ if (!DEFLATE_NEED_CHECKSUM(strm)) {}
+ else if (!((deflate_state *)(strm->state))->noheader) {
+ strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
+ }
+ memcpy(buf, strm->next_in, len);
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+static void lm_init(
+ deflate_state *s
+)
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+}
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+static uInt longest_match(
+ deflate_state *s,
+ IPos cur_match /* current match */
+)
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Byte *scan = s->window + s->strstart; /* current string */
+ register Byte *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Pos *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Byte *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ush*)scan;
+ register ush scan_end = *(ush*)(scan+best_len-1);
+#else
+ register Byte *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2:
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ush*)(match+best_len-1) != scan_end ||
+ *(ush*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ush*)(scan+=2) == *(ush*)(match+=2) &&
+ *(ush*)(scan+=2) == *(ush*)(match+=2) &&
+ *(ush*)(scan+=2) == *(ush*)(match+=2) &&
+ *(ush*)(scan+=2) == *(ush*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ush*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return best_len;
+ return s->lookahead;
+}
+
+#ifdef DEBUG_ZLIB
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+static void check_match(
+ deflate_state *s,
+ IPos start,
+ IPos match,
+ int length
+)
+{
+ /* check that the match is indeed a match */
+ if (memcmp((char *)s->window + match,
+ (char *)s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+static void fill_window(
+ deflate_state *s
+)
+{
+ register unsigned n, m;
+ register Pos *p;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if strstart == 0
+ * and lookahead == 1 (input done one byte at time)
+ */
+ more--;
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ } else if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ memcpy((char *)s->window, (char *)s->window+wsize,
+ (unsigned)wsize);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+
+ /* Slide the hash table (could be avoided with 32 bit values
+ at the expense of memory usage). We slide even when level == 0
+ to keep the hash table consistent if we switch back to level > 0
+ later. (Using level 0 permanently is not an optimal usage of
+ zlib, so we don't care about this pathological case.)
+ */
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ } while (--n);
+
+ n = wsize;
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) return;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead >= MIN_MATCH) {
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, eof) { \
+ zlib_tr_flush_block(s, (s->block_start >= 0L ? \
+ (char *)&s->window[(unsigned)s->block_start] : \
+ NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (eof)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, eof) { \
+ FLUSH_BLOCK_ONLY(s, eof); \
+ if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+static block_state deflate_stored(
+ deflate_state *s,
+ int flush
+)
+{
+ /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ * to pending_buf_size, and each stored block has a 5 byte header:
+ */
+ ulg max_block_size = 0xffff;
+ ulg max_start;
+
+ if (max_block_size > s->pending_buf_size - 5) {
+ max_block_size = s->pending_buf_size - 5;
+ }
+
+ /* Copy as much as possible from input to output: */
+ for (;;) {
+ /* Fill the window as much as possible: */
+ if (s->lookahead <= 1) {
+
+ Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+ s->block_start >= (long)s->w_size, "slide too late");
+
+ fill_window(s);
+ if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+ Assert(s->block_start >= 0L, "block gone");
+
+ s->strstart += s->lookahead;
+ s->lookahead = 0;
+
+ /* Emit a stored block if pending_buf will be full: */
+ max_start = s->block_start + max_block_size;
+ if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+ /* strstart == 0 is possible when wraparound on 16-bit machine */
+ s->lookahead = (uInt)(s->strstart - max_start);
+ s->strstart = (uInt)max_start;
+ FLUSH_BLOCK(s, 0);
+ }
+ /* Flush if we may have to slide, otherwise block_start may become
+ * negative and the data will be gone:
+ */
+ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+ FLUSH_BLOCK(s, 0);
+ }
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+static block_state deflate_fast(
+ deflate_state *s,
+ int flush
+)
+{
+ IPos hash_head = NIL; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ bflush = zlib_tr_tally(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in hash table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+static block_state deflate_slow(
+ deflate_state *s,
+ int flush
+)
+{
+ IPos hash_head = NIL; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
+ (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR))) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ zlib_tr_tally (s, 0, s->window[s->strstart-1]);
+ s->match_available = 0;
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+int zlib_deflate_workspacesize(int windowBits, int memLevel)
+{
+ if (windowBits < 0) /* undocumented feature: suppress zlib header */
+ windowBits = -windowBits;
+
+ /* Since the return value is typically passed to vmalloc() unchecked... */
+ BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
+ windowBits > 15);
+
+ return sizeof(deflate_workspace)
+ + zlib_deflate_window_memsize(windowBits)
+ + zlib_deflate_prev_memsize(windowBits)
+ + zlib_deflate_head_memsize(memLevel)
+ + zlib_deflate_overlay_memsize(memLevel);
+}
+
+int zlib_deflate_dfltcc_enabled(void)
+{
+ return DEFLATE_DFLTCC_ENABLED();
+}
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
new file mode 100644
index 000000000..24b740b99
--- /dev/null
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/zlib_deflate/deflate_syms.c
+ *
+ * Exported symbols for the deflate functionality.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/zlib.h>
+
+EXPORT_SYMBOL(zlib_deflate_workspacesize);
+EXPORT_SYMBOL(zlib_deflate_dfltcc_enabled);
+EXPORT_SYMBOL(zlib_deflate);
+EXPORT_SYMBOL(zlib_deflateInit2);
+EXPORT_SYMBOL(zlib_deflateEnd);
+EXPORT_SYMBOL(zlib_deflateReset);
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_deflate/deftree.c b/lib/zlib_deflate/deftree.c
new file mode 100644
index 000000000..a4a34da51
--- /dev/null
+++ b/lib/zlib_deflate/deftree.c
@@ -0,0 +1,1059 @@
+/* +++ trees.c */
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
+
+/* #include "deflate.h" */
+
+#include <linux/zutil.h>
+#include <linux/bitrev.h>
+#include "defutil.h"
+
+#ifdef DEBUG_ZLIB
+# include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+static const int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+static const uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+static ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
+ * below).
+ */
+
+static ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+static uch dist_code[512];
+/* distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+static uch length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+static int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+static int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+struct static_tree_desc_s {
+ const ct_data *static_tree; /* static tree or NULL */
+ const int *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+static static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+static static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+static static_tree_desc static_bl_desc =
+{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+static void tr_static_init (void);
+static void init_block (deflate_state *s);
+static void pqdownheap (deflate_state *s, ct_data *tree, int k);
+static void gen_bitlen (deflate_state *s, tree_desc *desc);
+static void gen_codes (ct_data *tree, int max_code, ush *bl_count);
+static void build_tree (deflate_state *s, tree_desc *desc);
+static void scan_tree (deflate_state *s, ct_data *tree, int max_code);
+static void send_tree (deflate_state *s, ct_data *tree, int max_code);
+static int build_bl_tree (deflate_state *s);
+static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
+ int blcodes);
+static void compress_block (deflate_state *s, ct_data *ltree,
+ ct_data *dtree);
+static void set_data_type (deflate_state *s);
+static void bi_flush (deflate_state *s);
+static void copy_block (deflate_state *s, char *buf, unsigned len,
+ int header);
+
+#ifndef DEBUG_ZLIB
+# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG_ZLIB */
+# define send_code(s, c, tree) \
+ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+#define d_code(dist) \
+ ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. dist_code[256] and dist_code[257] are never
+ * used.
+ */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables. In a multi-threaded environment,
+ * this function may be called by two threads concurrently, but this is
+ * harmless since both invocations do exactly the same thing.
+ */
+static void tr_static_init(void)
+{
+ static int static_init_done;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);
+ }
+ static_init_done = 1;
+}
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void zlib_tr_init(
+ deflate_state *s
+)
+{
+ tr_static_init();
+
+ s->compressed_len = 0L;
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG_ZLIB
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+static void init_block(
+ deflate_state *s
+)
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+static void pqdownheap(
+ deflate_state *s,
+ ct_data *tree, /* the tree to restore */
+ int k /* node to move down */
+)
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+static void gen_bitlen(
+ deflate_state *s,
+ tree_desc *desc /* the tree descriptor */
+)
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ const int *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (bits + xbits);
+ if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Trace((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if (tree[m].Len != (unsigned) bits) {
+ Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((long)bits - (long)tree[m].Len)
+ *(long)tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+static void gen_codes(
+ ct_data *tree, /* the tree to decorate */
+ int max_code, /* largest code with non zero frequency */
+ ush *bl_count /* number of codes at each bit length */
+)
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+ ush code = 0; /* running code value */
+ int bits; /* bit index */
+ int n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = (code + bl_count[bits-1]) << 1;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ "inconsistent bit counts");
+ Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ int len = tree[n].Len;
+ if (len == 0) continue;
+ /* Now reverse the bits */
+ tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len);
+
+ Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+static void build_tree(
+ deflate_state *s,
+ tree_desc *desc /* the tree descriptor */
+)
+{
+ ct_data *tree = desc->dyn_tree;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ int elems = desc->stat_desc->elems;
+ int n, m; /* iterate over heap elements */
+ int max_code = -1; /* largest code with non zero frequency */
+ int node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n].Freq != 0) {
+ s->heap[++(s->heap_len)] = max_code = n;
+ s->depth[n] = 0;
+ } else {
+ tree[n].Len = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s->heap_len < 2) {
+ node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+ tree[node].Freq = 1;
+ s->depth[node] = 0;
+ s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc->max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ pqremove(s, tree, n); /* n = node of least frequency */
+ m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+ s->heap[--(s->heap_max)] = m;
+
+ /* Create a new node father of n and m */
+ tree[node].Freq = tree[n].Freq + tree[m].Freq;
+ s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
+ tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+ if (tree == s->bl_tree) {
+ fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+ node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+ }
+#endif
+ /* and insert the new node in the heap */
+ s->heap[SMALLEST] = node++;
+ pqdownheap(s, tree, SMALLEST);
+
+ } while (s->heap_len >= 2);
+
+ s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, (tree_desc *)desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+static void scan_tree(
+ deflate_state *s,
+ ct_data *tree, /* the tree to be scanned */
+ int max_code /* and its largest code of non zero frequency */
+)
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ if (nextlen == 0) max_count = 138, min_count = 3;
+ tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ s->bl_tree[curlen].Freq += count;
+ } else if (curlen != 0) {
+ if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+ s->bl_tree[REP_3_6].Freq++;
+ } else if (count <= 10) {
+ s->bl_tree[REPZ_3_10].Freq++;
+ } else {
+ s->bl_tree[REPZ_11_138].Freq++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+static void send_tree(
+ deflate_state *s,
+ ct_data *tree, /* the tree to be scanned */
+ int max_code /* and its largest code of non zero frequency */
+)
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen == 0) max_count = 138, min_count = 3;
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+ } else if (curlen != 0) {
+ if (curlen != prevlen) {
+ send_code(s, curlen, s->bl_tree); count--;
+ }
+ Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+static int build_bl_tree(
+ deflate_state *s
+)
+{
+ int max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+ scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, (tree_desc *)(&(s->bl_desc)));
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+ if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s->opt_len += 3*(max_blindex+1) + 5+5+4;
+ Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ s->opt_len, s->static_len));
+
+ return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+static void send_all_trees(
+ deflate_state *s,
+ int lcodes, /* number of codes for each tree */
+ int dcodes, /* number of codes for each tree */
+ int blcodes /* number of codes for each tree */
+)
+{
+ int rank; /* index in bl_order */
+
+ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ "too many codes");
+ Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes-1, 5);
+ send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+ }
+ Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void zlib_tr_stored_block(
+ deflate_state *s,
+ char *buf, /* input block */
+ ulg stored_len, /* length of input block */
+ int eof /* true if this is the last block for a file */
+)
+{
+ send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
+ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+ s->compressed_len += (stored_len + 4) << 3;
+
+ copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
+}
+
+/* Send just the `stored block' type code without any length bytes or data.
+ */
+void zlib_tr_stored_type_only(
+ deflate_state *s
+)
+{
+ send_bits(s, (STORED_BLOCK << 1), 3);
+ bi_windup(s);
+ s->compressed_len = (s->compressed_len + 3) & ~7L;
+}
+
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ * The current inflate code requires 9 bits of lookahead. If the
+ * last two codes for the previous block (real code plus EOB) were coded
+ * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ * the last real code. In this case we send two empty static blocks instead
+ * of one. (There are no problems if the previous block is stored or fixed.)
+ * To simplify the code, we assume the worst case of last real code encoded
+ * on one bit only.
+ */
+void zlib_tr_align(
+ deflate_state *s
+)
+{
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+ bi_flush(s);
+ /* Of the 10 bits for the empty block, we have already sent
+ * (10 - bi_valid) bits. The lookahead for the last real code (before
+ * the EOB of the previous block) was thus at least one plus the length
+ * of the EOB plus what we have just sent of the empty static block.
+ */
+ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L;
+ bi_flush(s);
+ }
+ s->last_eob_len = 7;
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and output the encoded block to the zip file. This function
+ * returns the total compressed length for the file so far.
+ */
+ulg zlib_tr_flush_block(
+ deflate_state *s,
+ char *buf, /* input block, or NULL if too old */
+ ulg stored_len, /* length of input block */
+ int eof /* true if this is the last block for a file */
+)
+{
+ ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ int max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s->level > 0) {
+
+ /* Check if the file is ascii or binary */
+ if (s->data_type == Z_UNKNOWN) set_data_type(s);
+
+ /* Construct the literal and distance trees */
+ build_tree(s, (tree_desc *)(&(s->l_desc)));
+ Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+
+ build_tree(s, (tree_desc *)(&(s->d_desc)));
+ Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute first the block length in bytes*/
+ opt_lenb = (s->opt_len+3+7)>>3;
+ static_lenb = (s->static_len+3+7)>>3;
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ s->last_lit));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+ } else {
+ Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+ /* If compression failed and this is the first and last block,
+ * and if the .zip file can be seeked (to rewrite the local header),
+ * the whole file is transformed into a stored file:
+ */
+#ifdef STORED_FILE_OK
+# ifdef FORCE_STORED_FILE
+ if (eof && s->compressed_len == 0L) { /* force stored file */
+# else
+ if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
+# endif
+ /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
+ if (buf == (char*)0) error ("block vanished");
+
+ copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
+ s->compressed_len = stored_len << 3;
+ s->method = STORED;
+ } else
+#endif /* STORED_FILE_OK */
+
+#ifdef FORCE_STORED
+ if (buf != (char*)0) { /* force stored block */
+#else
+ if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ /* 4: two words for the lengths */
+#endif
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ zlib_tr_stored_block(s, buf, stored_len, eof);
+
+#ifdef FORCE_STATIC
+ } else if (static_lenb >= 0) { /* force static trees */
+#else
+ } else if (static_lenb == opt_lenb) {
+#endif
+ send_bits(s, (STATIC_TREES<<1)+eof, 3);
+ compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
+ s->compressed_len += 3 + s->static_len;
+ } else {
+ send_bits(s, (DYN_TREES<<1)+eof, 3);
+ send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+ max_blindex+1);
+ compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
+ s->compressed_len += 3 + s->opt_len;
+ }
+ Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ init_block(s);
+
+ if (eof) {
+ bi_windup(s);
+ s->compressed_len += 7; /* align on byte boundary */
+ }
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ s->compressed_len-7*eof));
+
+ return s->compressed_len >> 3;
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int zlib_tr_tally(
+ deflate_state *s,
+ unsigned dist, /* distance of matched string */
+ unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */
+)
+{
+ s->d_buf[s->last_lit] = (ush)dist;
+ s->l_buf[s->last_lit++] = (uch)lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+ } else {
+ s->matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ Assert((ush)dist < (ush)MAX_DIST(s) &&
+ (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ (ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
+
+ s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+
+ /* Try to guess if it is profitable to stop the current block here */
+ if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
+ /* Compute an upper bound for the compressed length */
+ ulg out_length = (ulg)s->last_lit*8L;
+ ulg in_length = (ulg)((long)s->strstart - s->block_start);
+ int dcode;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += (ulg)s->dyn_dtree[dcode].Freq *
+ (5L+extra_dbits[dcode]);
+ }
+ out_length >>= 3;
+ Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+ s->last_lit, in_length, out_length,
+ 100L - out_length*100L/in_length));
+ if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+ }
+ return (s->last_lit == s->lit_bufsize-1);
+ /* We avoid equality with lit_bufsize because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+static void compress_block(
+ deflate_state *s,
+ ct_data *ltree, /* literal tree */
+ ct_data *dtree /* distance tree */
+)
+{
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+ unsigned lx = 0; /* running index in l_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+ if (s->last_lit != 0) do {
+ dist = s->d_buf[lx];
+ lc = s->l_buf[lx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = length_code[lc];
+ send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra != 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra != 0) {
+ dist -= base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+ Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
+
+ } while (lx < s->last_lit);
+
+ send_code(s, END_BLOCK, ltree);
+ s->last_eob_len = ltree[END_BLOCK].Len;
+}
+
+/* ===========================================================================
+ * Set the data type to ASCII or BINARY, using a crude approximation:
+ * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ * IN assertion: the fields freq of dyn_ltree are set and the total of all
+ * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ */
+static void set_data_type(
+ deflate_state *s
+)
+{
+ int n = 0;
+ unsigned ascii_freq = 0;
+ unsigned bin_freq = 0;
+ while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
+ while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
+ while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
+ s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+}
+
+/* ===========================================================================
+ * Copy a stored block, storing first the length and its
+ * one's complement if requested.
+ */
+static void copy_block(
+ deflate_state *s,
+ char *buf, /* the input data */
+ unsigned len, /* its length */
+ int header /* true if block header must be written */
+)
+{
+ bi_windup(s); /* align on byte boundary */
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+
+ if (header) {
+ put_short(s, (ush)len);
+ put_short(s, (ush)~len);
+#ifdef DEBUG_ZLIB
+ s->bits_sent += 2*16;
+#endif
+ }
+#ifdef DEBUG_ZLIB
+ s->bits_sent += (ulg)len<<3;
+#endif
+ /* bundle up the put_byte(s, *buf++) calls */
+ memcpy(&s->pending_buf[s->pending], buf, len);
+ s->pending += len;
+}
+
diff --git a/lib/zlib_deflate/defutil.h b/lib/zlib_deflate/defutil.h
new file mode 100644
index 000000000..4ea40f5a2
--- /dev/null
+++ b/lib/zlib_deflate/defutil.h
@@ -0,0 +1,443 @@
+#ifndef DEFUTIL_H
+#define DEFUTIL_H
+
+#include <linux/zutil.h>
+
+#define Assert(err, str)
+#define Trace(dummy)
+#define Tracev(dummy)
+#define Tracecv(err, dummy)
+#define Tracevv(dummy)
+
+
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE 42
+#define BUSY_STATE 113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ static_tree_desc *stat_desc; /* the corresponding static tree */
+} tree_desc;
+
+typedef ush Pos;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct deflate_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Byte *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Byte *pending_out; /* next pending byte to output to the stream */
+ int pending; /* nb of bytes in the pending buffer */
+ int noheader; /* suppress zlib header and adler32 */
+ Byte data_type; /* UNKNOWN, BINARY or ASCII */
+ Byte method; /* STORED (for zip only) or DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Byte *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Pos *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Pos *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to suppress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uch *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ush *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ ulg compressed_len; /* total bit length of compressed file */
+ uInt matches; /* number of string matches in current block */
+ int last_eob_len; /* bit length of EOB code for last block */
+
+#ifdef DEBUG_ZLIB
+ ulg bits_sent; /* bit length of the compressed data */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+} deflate_state;
+
+#ifdef CONFIG_ZLIB_DFLTCC
+#define zlib_deflate_window_memsize(windowBits) \
+ (2 * (1 << (windowBits)) * sizeof(Byte) + PAGE_SIZE)
+#else
+#define zlib_deflate_window_memsize(windowBits) \
+ (2 * (1 << (windowBits)) * sizeof(Byte))
+#endif
+#define zlib_deflate_prev_memsize(windowBits) \
+ ((1 << (windowBits)) * sizeof(Pos))
+#define zlib_deflate_head_memsize(memLevel) \
+ ((1 << ((memLevel)+7)) * sizeof(Pos))
+#define zlib_deflate_overlay_memsize(memLevel) \
+ ((1 << ((memLevel)+6)) * (sizeof(ush)+2))
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+ /* in trees.c */
+void zlib_tr_init (deflate_state *s);
+int zlib_tr_tally (deflate_state *s, unsigned dist, unsigned lc);
+ulg zlib_tr_flush_block (deflate_state *s, char *buf, ulg stored_len,
+ int eof);
+void zlib_tr_align (deflate_state *s);
+void zlib_tr_stored_block (deflate_state *s, char *buf, ulg stored_len,
+ int eof);
+void zlib_tr_stored_type_only (deflate_state *);
+
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+static inline unsigned bi_reverse(
+ unsigned code, /* the value to invert */
+ int len /* its bit length */
+)
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+static inline void bi_flush(deflate_state *s)
+{
+ if (s->bi_valid == 16) {
+ put_short(s, s->bi_buf);
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ } else if (s->bi_valid >= 8) {
+ put_byte(s, (Byte)s->bi_buf);
+ s->bi_buf >>= 8;
+ s->bi_valid -= 8;
+ }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+static inline void bi_windup(deflate_state *s)
+{
+ if (s->bi_valid > 8) {
+ put_short(s, s->bi_buf);
+ } else if (s->bi_valid > 0) {
+ put_byte(s, (Byte)s->bi_buf);
+ }
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef DEBUG_ZLIB
+ s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
+
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+static void send_bits (deflate_state *s, int value, int length);
+
+static void send_bits(
+ deflate_state *s,
+ int value, /* value to send */
+ int length /* number of bits */
+)
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (value << s->bi_valid);
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = value;\
+ s->bi_buf |= (val << s->bi_valid);\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG_ZLIB */
+
+static inline void zlib_tr_send_bits(
+ deflate_state *s,
+ int value,
+ int length
+)
+{
+ send_bits(s, value, length);
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+static inline void flush_pending(
+ z_streamp strm
+)
+{
+ unsigned len;
+ deflate_state *s = (deflate_state *) strm->state;
+
+ bi_flush(s);
+ len = s->pending;
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ if (strm->next_out != NULL) {
+ memcpy(strm->next_out, s->pending_out, len);
+ strm->next_out += len;
+ }
+ s->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ s->pending -= len;
+ if (s->pending == 0) {
+ s->pending_out = s->pending_buf;
+ }
+}
+#endif /* DEFUTIL_H */
diff --git a/lib/zlib_dfltcc/Makefile b/lib/zlib_dfltcc/Makefile
new file mode 100644
index 000000000..66e1c9638
--- /dev/null
+++ b/lib/zlib_dfltcc/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is the code for s390 zlib hardware support.
+#
+
+obj-$(CONFIG_ZLIB_DFLTCC) += zlib_dfltcc.o
+
+zlib_dfltcc-objs := dfltcc.o dfltcc_deflate.o dfltcc_inflate.o
diff --git a/lib/zlib_dfltcc/dfltcc.c b/lib/zlib_dfltcc/dfltcc.c
new file mode 100644
index 000000000..ac6ac9739
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Zlib
+/* dfltcc.c - SystemZ DEFLATE CONVERSION CALL support. */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include "dfltcc_util.h"
+#include "dfltcc.h"
+
+char *oesc_msg(
+ char *buf,
+ int oesc
+)
+{
+ if (oesc == 0x00)
+ return NULL; /* Successful completion */
+ else {
+#ifdef STATIC
+ return NULL; /* Ignore for pre-boot decompressor */
+#else
+ sprintf(buf, "Operation-Ending-Supplemental Code is 0x%.2X", oesc);
+ return buf;
+#endif
+ }
+}
+
+void dfltcc_reset_state(struct dfltcc_state *dfltcc_state) {
+ /* Initialize available functions */
+ if (is_dfltcc_enabled()) {
+ dfltcc(DFLTCC_QAF, &dfltcc_state->param, NULL, NULL, NULL, NULL, NULL);
+ memmove(&dfltcc_state->af, &dfltcc_state->param, sizeof(dfltcc_state->af));
+ } else
+ memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
+
+ /* Initialize parameter block */
+ memset(&dfltcc_state->param, 0, sizeof(dfltcc_state->param));
+ dfltcc_state->param.nt = 1;
+ dfltcc_state->param.ribm = DFLTCC_RIBM;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_dfltcc/dfltcc.h b/lib/zlib_dfltcc/dfltcc.h
new file mode 100644
index 000000000..b96232bdd
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc.h
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_H
+#define DFLTCC_H
+
+#include "../zlib_deflate/defutil.h"
+#include <asm/facility.h>
+#include <asm/setup.h>
+
+/*
+ * Tuning parameters.
+ */
+#define DFLTCC_LEVEL_MASK 0x2 /* DFLTCC compression for level 1 only */
+#define DFLTCC_LEVEL_MASK_DEBUG 0x3fe /* DFLTCC compression for all levels */
+#define DFLTCC_BLOCK_SIZE 1048576
+#define DFLTCC_FIRST_FHT_BLOCK_SIZE 4096
+#define DFLTCC_DHT_MIN_SAMPLE_SIZE 4096
+#define DFLTCC_RIBM 0
+
+#define DFLTCC_FACILITY 151
+
+/*
+ * Parameter Block for Query Available Functions.
+ */
+struct dfltcc_qaf_param {
+ char fns[16];
+ char reserved1[8];
+ char fmts[2];
+ char reserved2[6];
+};
+
+static_assert(sizeof(struct dfltcc_qaf_param) == 32);
+
+#define DFLTCC_FMT0 0
+
+/*
+ * Parameter Block for Generate Dynamic-Huffman Table, Compress and Expand.
+ */
+struct dfltcc_param_v0 {
+ uint16_t pbvn; /* Parameter-Block-Version Number */
+ uint8_t mvn; /* Model-Version Number */
+ uint8_t ribm; /* Reserved for IBM use */
+ unsigned reserved32 : 31;
+ unsigned cf : 1; /* Continuation Flag */
+ uint8_t reserved64[8];
+ unsigned nt : 1; /* New Task */
+ unsigned reserved129 : 1;
+ unsigned cvt : 1; /* Check Value Type */
+ unsigned reserved131 : 1;
+ unsigned htt : 1; /* Huffman-Table Type */
+ unsigned bcf : 1; /* Block-Continuation Flag */
+ unsigned bcc : 1; /* Block Closing Control */
+ unsigned bhf : 1; /* Block Header Final */
+ unsigned reserved136 : 1;
+ unsigned reserved137 : 1;
+ unsigned dhtgc : 1; /* DHT Generation Control */
+ unsigned reserved139 : 5;
+ unsigned reserved144 : 5;
+ unsigned sbb : 3; /* Sub-Byte Boundary */
+ uint8_t oesc; /* Operation-Ending-Supplemental Code */
+ unsigned reserved160 : 12;
+ unsigned ifs : 4; /* Incomplete-Function Status */
+ uint16_t ifl; /* Incomplete-Function Length */
+ uint8_t reserved192[8];
+ uint8_t reserved256[8];
+ uint8_t reserved320[4];
+ uint16_t hl; /* History Length */
+ unsigned reserved368 : 1;
+ uint16_t ho : 15; /* History Offset */
+ uint32_t cv; /* Check Value */
+ unsigned eobs : 15; /* End-of-block Symbol */
+ unsigned reserved431: 1;
+ uint8_t eobl : 4; /* End-of-block Length */
+ unsigned reserved436 : 12;
+ unsigned reserved448 : 4;
+ uint16_t cdhtl : 12; /* Compressed-Dynamic-Huffman Table
+ Length */
+ uint8_t reserved464[6];
+ uint8_t cdht[288];
+ uint8_t reserved[32];
+ uint8_t csb[1152];
+};
+
+static_assert(sizeof(struct dfltcc_param_v0) == 1536);
+
+#define CVT_CRC32 0
+#define CVT_ADLER32 1
+#define HTT_FIXED 0
+#define HTT_DYNAMIC 1
+
+/*
+ * Extension of inflate_state and deflate_state for DFLTCC.
+ */
+struct dfltcc_state {
+ struct dfltcc_param_v0 param; /* Parameter block */
+ struct dfltcc_qaf_param af; /* Available functions */
+ char msg[64]; /* Buffer for strm->msg */
+};
+
+/*
+ * Extension of inflate_state and deflate_state for DFLTCC.
+ */
+struct dfltcc_deflate_state {
+ struct dfltcc_state common; /* Parameter block */
+ uLong level_mask; /* Levels on which to use DFLTCC */
+ uLong block_size; /* New block each X bytes */
+ uLong block_threshold; /* New block after total_in > X */
+ uLong dht_threshold; /* New block only if avail_in >= X */
+};
+
+#define ALIGN_UP(p, size) (__typeof__(p))(((uintptr_t)(p) + ((size) - 1)) & ~((size) - 1))
+/* Resides right after inflate_state or deflate_state */
+#define GET_DFLTCC_STATE(state) ((struct dfltcc_state *)((char *)(state) + ALIGN_UP(sizeof(*state), 8)))
+
+void dfltcc_reset_state(struct dfltcc_state *dfltcc_state);
+
+static inline int is_dfltcc_enabled(void)
+{
+return (zlib_dfltcc_support != ZLIB_DFLTCC_DISABLED &&
+ test_facility(DFLTCC_FACILITY));
+}
+
+#define DEFLATE_DFLTCC_ENABLED() is_dfltcc_enabled()
+
+#endif /* DFLTCC_H */
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.c b/lib/zlib_dfltcc/dfltcc_deflate.c
new file mode 100644
index 000000000..b732b6d9e
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_deflate.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: Zlib
+
+#include "../zlib_deflate/defutil.h"
+#include "dfltcc_util.h"
+#include "dfltcc_deflate.h"
+#include <asm/setup.h>
+#include <linux/export.h>
+#include <linux/zutil.h>
+
+#define GET_DFLTCC_DEFLATE_STATE(state) ((struct dfltcc_deflate_state *)GET_DFLTCC_STATE(state))
+
+/*
+ * Compress.
+ */
+int dfltcc_can_deflate(
+ z_streamp strm
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
+
+ /* Check for kernel dfltcc command line parameter */
+ if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
+ zlib_dfltcc_support == ZLIB_DFLTCC_INFLATE_ONLY)
+ return 0;
+
+ /* Unsupported compression settings */
+ if (!dfltcc_are_params_ok(state->level, state->w_bits, state->strategy,
+ dfltcc_state->level_mask))
+ return 0;
+
+ /* Unsupported hardware */
+ if (!is_bit_set(dfltcc_state->common.af.fns, DFLTCC_GDHT) ||
+ !is_bit_set(dfltcc_state->common.af.fns, DFLTCC_CMPR) ||
+ !is_bit_set(dfltcc_state->common.af.fmts, DFLTCC_FMT0))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(dfltcc_can_deflate);
+
+void dfltcc_reset_deflate_state(z_streamp strm) {
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
+
+ dfltcc_reset_state(&dfltcc_state->common);
+
+ /* Initialize tuning parameters */
+ if (zlib_dfltcc_support == ZLIB_DFLTCC_FULL_DEBUG)
+ dfltcc_state->level_mask = DFLTCC_LEVEL_MASK_DEBUG;
+ else
+ dfltcc_state->level_mask = DFLTCC_LEVEL_MASK;
+ dfltcc_state->block_size = DFLTCC_BLOCK_SIZE;
+ dfltcc_state->block_threshold = DFLTCC_FIRST_FHT_BLOCK_SIZE;
+ dfltcc_state->dht_threshold = DFLTCC_DHT_MIN_SAMPLE_SIZE;
+}
+EXPORT_SYMBOL(dfltcc_reset_deflate_state);
+
+static void dfltcc_gdht(
+ z_streamp strm
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+ size_t avail_in = strm->avail_in;
+
+ dfltcc(DFLTCC_GDHT,
+ param, NULL, NULL,
+ &strm->next_in, &avail_in, NULL);
+}
+
+static dfltcc_cc dfltcc_cmpr(
+ z_streamp strm
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+ size_t avail_in = strm->avail_in;
+ size_t avail_out = strm->avail_out;
+ dfltcc_cc cc;
+
+ cc = dfltcc(DFLTCC_CMPR | HBT_CIRCULAR,
+ param, &strm->next_out, &avail_out,
+ &strm->next_in, &avail_in, state->window);
+ strm->total_in += (strm->avail_in - avail_in);
+ strm->total_out += (strm->avail_out - avail_out);
+ strm->avail_in = avail_in;
+ strm->avail_out = avail_out;
+ return cc;
+}
+
+static void send_eobs(
+ z_streamp strm,
+ const struct dfltcc_param_v0 *param
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+
+ zlib_tr_send_bits(
+ state,
+ bi_reverse(param->eobs >> (15 - param->eobl), param->eobl),
+ param->eobl);
+ flush_pending(strm);
+ if (state->pending != 0) {
+ /* The remaining data is located in pending_out[0:pending]. If someone
+ * calls put_byte() - this might happen in deflate() - the byte will be
+ * placed into pending_buf[pending], which is incorrect. Move the
+ * remaining data to the beginning of pending_buf so that put_byte() is
+ * usable again.
+ */
+ memmove(state->pending_buf, state->pending_out, state->pending);
+ state->pending_out = state->pending_buf;
+ }
+#ifdef ZLIB_DEBUG
+ state->compressed_len += param->eobl;
+#endif
+}
+
+int dfltcc_deflate(
+ z_streamp strm,
+ int flush,
+ block_state *result
+)
+{
+ deflate_state *state = (deflate_state *)strm->state;
+ struct dfltcc_deflate_state *dfltcc_state = GET_DFLTCC_DEFLATE_STATE(state);
+ struct dfltcc_param_v0 *param = &dfltcc_state->common.param;
+ uInt masked_avail_in;
+ dfltcc_cc cc;
+ int need_empty_block;
+ int soft_bcc;
+ int no_flush;
+
+ if (!dfltcc_can_deflate(strm)) {
+ /* Clear history. */
+ if (flush == Z_FULL_FLUSH)
+ param->hl = 0;
+ return 0;
+ }
+
+again:
+ masked_avail_in = 0;
+ soft_bcc = 0;
+ no_flush = flush == Z_NO_FLUSH;
+
+ /* No input data. Return, except when Continuation Flag is set, which means
+ * that DFLTCC has buffered some output in the parameter block and needs to
+ * be called again in order to flush it.
+ */
+ if (strm->avail_in == 0 && !param->cf) {
+ /* A block is still open, and the hardware does not support closing
+ * blocks without adding data. Thus, close it manually.
+ */
+ if (!no_flush && param->bcf) {
+ send_eobs(strm, param);
+ param->bcf = 0;
+ }
+ /* Let one of deflate_* functions write a trailing empty block. */
+ if (flush == Z_FINISH)
+ return 0;
+ /* Clear history. */
+ if (flush == Z_FULL_FLUSH)
+ param->hl = 0;
+ /* Trigger block post-processing if necessary. */
+ *result = no_flush ? need_more : block_done;
+ return 1;
+ }
+
+ /* There is an open non-BFINAL block, we are not going to close it just
+ * yet, we have compressed more than DFLTCC_BLOCK_SIZE bytes and we see
+ * more than DFLTCC_DHT_MIN_SAMPLE_SIZE bytes. Open a new block with a new
+ * DHT in order to adapt to a possibly changed input data distribution.
+ */
+ if (param->bcf && no_flush &&
+ strm->total_in > dfltcc_state->block_threshold &&
+ strm->avail_in >= dfltcc_state->dht_threshold) {
+ if (param->cf) {
+ /* We need to flush the DFLTCC buffer before writing the
+ * End-of-block Symbol. Mask the input data and proceed as usual.
+ */
+ masked_avail_in += strm->avail_in;
+ strm->avail_in = 0;
+ no_flush = 0;
+ } else {
+ /* DFLTCC buffer is empty, so we can manually write the
+ * End-of-block Symbol right away.
+ */
+ send_eobs(strm, param);
+ param->bcf = 0;
+ dfltcc_state->block_threshold =
+ strm->total_in + dfltcc_state->block_size;
+ }
+ }
+
+ /* No space for compressed data. If we proceed, dfltcc_cmpr() will return
+ * DFLTCC_CC_OP1_TOO_SHORT without buffering header bits, but we will still
+ * set BCF=1, which is wrong. Avoid complications and return early.
+ */
+ if (strm->avail_out == 0) {
+ *result = need_more;
+ return 1;
+ }
+
+ /* The caller gave us too much data. Pass only one block worth of
+ * uncompressed data to DFLTCC and mask the rest, so that on the next
+ * iteration we start a new block.
+ */
+ if (no_flush && strm->avail_in > dfltcc_state->block_size) {
+ masked_avail_in += (strm->avail_in - dfltcc_state->block_size);
+ strm->avail_in = dfltcc_state->block_size;
+ }
+
+ /* When we have an open non-BFINAL deflate block and caller indicates that
+ * the stream is ending, we need to close an open deflate block and open a
+ * BFINAL one.
+ */
+ need_empty_block = flush == Z_FINISH && param->bcf && !param->bhf;
+
+ /* Translate stream to parameter block */
+ param->cvt = CVT_ADLER32;
+ if (!no_flush)
+ /* We need to close a block. Always do this in software - when there is
+ * no input data, the hardware will not hohor BCC. */
+ soft_bcc = 1;
+ if (flush == Z_FINISH && !param->bcf)
+ /* We are about to open a BFINAL block, set Block Header Final bit
+ * until the stream ends.
+ */
+ param->bhf = 1;
+ /* DFLTCC-CMPR will write to next_out, so make sure that buffers with
+ * higher precedence are empty.
+ */
+ Assert(state->pending == 0, "There must be no pending bytes");
+ Assert(state->bi_valid < 8, "There must be less than 8 pending bits");
+ param->sbb = (unsigned int)state->bi_valid;
+ if (param->sbb > 0)
+ *strm->next_out = (Byte)state->bi_buf;
+ /* Honor history and check value */
+ param->nt = 0;
+ param->cv = strm->adler;
+
+ /* When opening a block, choose a Huffman-Table Type */
+ if (!param->bcf) {
+ if (strm->total_in == 0 && dfltcc_state->block_threshold > 0) {
+ param->htt = HTT_FIXED;
+ }
+ else {
+ param->htt = HTT_DYNAMIC;
+ dfltcc_gdht(strm);
+ }
+ }
+
+ /* Deflate */
+ do {
+ cc = dfltcc_cmpr(strm);
+ if (strm->avail_in < 4096 && masked_avail_in > 0)
+ /* We are about to call DFLTCC with a small input buffer, which is
+ * inefficient. Since there is masked data, there will be at least
+ * one more DFLTCC call, so skip the current one and make the next
+ * one handle more data.
+ */
+ break;
+ } while (cc == DFLTCC_CC_AGAIN);
+
+ /* Translate parameter block to stream */
+ strm->msg = oesc_msg(dfltcc_state->common.msg, param->oesc);
+ state->bi_valid = param->sbb;
+ if (state->bi_valid == 0)
+ state->bi_buf = 0; /* Avoid accessing next_out */
+ else
+ state->bi_buf = *strm->next_out & ((1 << state->bi_valid) - 1);
+ strm->adler = param->cv;
+
+ /* Unmask the input data */
+ strm->avail_in += masked_avail_in;
+ masked_avail_in = 0;
+
+ /* If we encounter an error, it means there is a bug in DFLTCC call */
+ Assert(cc != DFLTCC_CC_OP2_CORRUPT || param->oesc == 0, "BUG");
+
+ /* Update Block-Continuation Flag. It will be used to check whether to call
+ * GDHT the next time.
+ */
+ if (cc == DFLTCC_CC_OK) {
+ if (soft_bcc) {
+ send_eobs(strm, param);
+ param->bcf = 0;
+ dfltcc_state->block_threshold =
+ strm->total_in + dfltcc_state->block_size;
+ } else
+ param->bcf = 1;
+ if (flush == Z_FINISH) {
+ if (need_empty_block)
+ /* Make the current deflate() call also close the stream */
+ return 0;
+ else {
+ bi_windup(state);
+ *result = finish_done;
+ }
+ } else {
+ if (flush == Z_FULL_FLUSH)
+ param->hl = 0; /* Clear history */
+ *result = flush == Z_NO_FLUSH ? need_more : block_done;
+ }
+ } else {
+ param->bcf = 1;
+ *result = need_more;
+ }
+ if (strm->avail_in != 0 && strm->avail_out != 0)
+ goto again; /* deflate() must use all input or all output */
+ return 1;
+}
+EXPORT_SYMBOL(dfltcc_deflate);
diff --git a/lib/zlib_dfltcc/dfltcc_deflate.h b/lib/zlib_dfltcc/dfltcc_deflate.h
new file mode 100644
index 000000000..be44b4383
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_deflate.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_DEFLATE_H
+#define DFLTCC_DEFLATE_H
+
+#include "dfltcc.h"
+
+/* External functions */
+int dfltcc_can_deflate(z_streamp strm);
+int dfltcc_deflate(z_streamp strm,
+ int flush,
+ block_state *result);
+void dfltcc_reset_deflate_state(z_streamp strm);
+
+#define DEFLATE_RESET_HOOK(strm) \
+ dfltcc_reset_deflate_state((strm))
+
+#define DEFLATE_HOOK dfltcc_deflate
+
+#define DEFLATE_NEED_CHECKSUM(strm) (!dfltcc_can_deflate((strm)))
+
+#endif /* DFLTCC_DEFLATE_H */
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.c b/lib/zlib_dfltcc/dfltcc_inflate.c
new file mode 100644
index 000000000..437cd34c8
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_inflate.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: Zlib
+
+#include "../zlib_inflate/inflate.h"
+#include "dfltcc_util.h"
+#include "dfltcc_inflate.h"
+#include <asm/setup.h>
+#include <linux/export.h>
+#include <linux/zutil.h>
+
+/*
+ * Expand.
+ */
+int dfltcc_can_inflate(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+ /* Check for kernel dfltcc command line parameter */
+ if (zlib_dfltcc_support == ZLIB_DFLTCC_DISABLED ||
+ zlib_dfltcc_support == ZLIB_DFLTCC_DEFLATE_ONLY)
+ return 0;
+
+ /* Unsupported hardware */
+ return is_bit_set(dfltcc_state->af.fns, DFLTCC_XPND) &&
+ is_bit_set(dfltcc_state->af.fmts, DFLTCC_FMT0);
+}
+EXPORT_SYMBOL(dfltcc_can_inflate);
+
+void dfltcc_reset_inflate_state(z_streamp strm) {
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+ dfltcc_reset_state(dfltcc_state);
+}
+EXPORT_SYMBOL(dfltcc_reset_inflate_state);
+
+static int dfltcc_was_inflate_used(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+
+ return !param->nt;
+}
+
+static int dfltcc_inflate_disable(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+
+ if (!dfltcc_can_inflate(strm))
+ return 0;
+ if (dfltcc_was_inflate_used(strm))
+ /* DFLTCC has already decompressed some data. Since there is not
+ * enough information to resume decompression in software, the call
+ * must fail.
+ */
+ return 1;
+ /* DFLTCC was not used yet - decompress in software */
+ memset(&dfltcc_state->af, 0, sizeof(dfltcc_state->af));
+ return 0;
+}
+
+static dfltcc_cc dfltcc_xpnd(
+ z_streamp strm
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_param_v0 *param = &GET_DFLTCC_STATE(state)->param;
+ size_t avail_in = strm->avail_in;
+ size_t avail_out = strm->avail_out;
+ dfltcc_cc cc;
+
+ cc = dfltcc(DFLTCC_XPND | HBT_CIRCULAR,
+ param, &strm->next_out, &avail_out,
+ &strm->next_in, &avail_in, state->window);
+ strm->avail_in = avail_in;
+ strm->avail_out = avail_out;
+ return cc;
+}
+
+dfltcc_inflate_action dfltcc_inflate(
+ z_streamp strm,
+ int flush,
+ int *ret
+)
+{
+ struct inflate_state *state = (struct inflate_state *)strm->state;
+ struct dfltcc_state *dfltcc_state = GET_DFLTCC_STATE(state);
+ struct dfltcc_param_v0 *param = &dfltcc_state->param;
+ dfltcc_cc cc;
+
+ if (flush == Z_BLOCK || flush == Z_PACKET_FLUSH) {
+ /* DFLTCC does not support stopping on block boundaries (Z_BLOCK flush option)
+ * as well as the use of Z_PACKET_FLUSH option (used exclusively by PPP driver)
+ */
+ if (dfltcc_inflate_disable(strm)) {
+ *ret = Z_STREAM_ERROR;
+ return DFLTCC_INFLATE_BREAK;
+ } else
+ return DFLTCC_INFLATE_SOFTWARE;
+ }
+
+ if (state->last) {
+ if (state->bits != 0) {
+ strm->next_in++;
+ strm->avail_in--;
+ state->bits = 0;
+ }
+ state->mode = CHECK;
+ return DFLTCC_INFLATE_CONTINUE;
+ }
+
+ if (strm->avail_in == 0 && !param->cf)
+ return DFLTCC_INFLATE_BREAK;
+
+ if (!state->window || state->wsize == 0) {
+ state->mode = MEM;
+ return DFLTCC_INFLATE_CONTINUE;
+ }
+
+ /* Translate stream to parameter block */
+ param->cvt = CVT_ADLER32;
+ param->sbb = state->bits;
+ if (param->hl)
+ param->nt = 0; /* Honor history for the first block */
+ param->cv = state->check;
+
+ /* Inflate */
+ do {
+ cc = dfltcc_xpnd(strm);
+ } while (cc == DFLTCC_CC_AGAIN);
+
+ /* Translate parameter block to stream */
+ strm->msg = oesc_msg(dfltcc_state->msg, param->oesc);
+ state->last = cc == DFLTCC_CC_OK;
+ state->bits = param->sbb;
+ state->check = param->cv;
+ if (cc == DFLTCC_CC_OP2_CORRUPT && param->oesc != 0) {
+ /* Report an error if stream is corrupted */
+ state->mode = BAD;
+ return DFLTCC_INFLATE_CONTINUE;
+ }
+ state->mode = TYPEDO;
+ /* Break if operands are exhausted, otherwise continue looping */
+ return (cc == DFLTCC_CC_OP1_TOO_SHORT || cc == DFLTCC_CC_OP2_TOO_SHORT) ?
+ DFLTCC_INFLATE_BREAK : DFLTCC_INFLATE_CONTINUE;
+}
+EXPORT_SYMBOL(dfltcc_inflate);
diff --git a/lib/zlib_dfltcc/dfltcc_inflate.h b/lib/zlib_dfltcc/dfltcc_inflate.h
new file mode 100644
index 000000000..98d4bc42e
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_inflate.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_INFLATE_H
+#define DFLTCC_INFLATE_H
+
+#include "dfltcc.h"
+
+/* External functions */
+void dfltcc_reset_inflate_state(z_streamp strm);
+int dfltcc_can_inflate(z_streamp strm);
+typedef enum {
+ DFLTCC_INFLATE_CONTINUE,
+ DFLTCC_INFLATE_BREAK,
+ DFLTCC_INFLATE_SOFTWARE,
+} dfltcc_inflate_action;
+dfltcc_inflate_action dfltcc_inflate(z_streamp strm,
+ int flush, int *ret);
+#define INFLATE_RESET_HOOK(strm) \
+ dfltcc_reset_inflate_state((strm))
+
+#define INFLATE_TYPEDO_HOOK(strm, flush) \
+ if (dfltcc_can_inflate((strm))) { \
+ dfltcc_inflate_action action; \
+\
+ RESTORE(); \
+ action = dfltcc_inflate((strm), (flush), &ret); \
+ LOAD(); \
+ if (action == DFLTCC_INFLATE_CONTINUE) \
+ break; \
+ else if (action == DFLTCC_INFLATE_BREAK) \
+ goto inf_leave; \
+ }
+
+#define INFLATE_NEED_CHECKSUM(strm) (!dfltcc_can_inflate((strm)))
+
+#define INFLATE_NEED_UPDATEWINDOW(strm) (!dfltcc_can_inflate((strm)))
+
+#endif /* DFLTCC_DEFLATE_H */
diff --git a/lib/zlib_dfltcc/dfltcc_util.h b/lib/zlib_dfltcc/dfltcc_util.h
new file mode 100644
index 000000000..4a46b5009
--- /dev/null
+++ b/lib/zlib_dfltcc/dfltcc_util.h
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: Zlib
+#ifndef DFLTCC_UTIL_H
+#define DFLTCC_UTIL_H
+
+#include <linux/zutil.h>
+
+/*
+ * C wrapper for the DEFLATE CONVERSION CALL instruction.
+ */
+typedef enum {
+ DFLTCC_CC_OK = 0,
+ DFLTCC_CC_OP1_TOO_SHORT = 1,
+ DFLTCC_CC_OP2_TOO_SHORT = 2,
+ DFLTCC_CC_OP2_CORRUPT = 2,
+ DFLTCC_CC_AGAIN = 3,
+} dfltcc_cc;
+
+#define DFLTCC_QAF 0
+#define DFLTCC_GDHT 1
+#define DFLTCC_CMPR 2
+#define DFLTCC_XPND 4
+#define HBT_CIRCULAR (1 << 7)
+#define HB_BITS 15
+#define HB_SIZE (1 << HB_BITS)
+
+static inline dfltcc_cc dfltcc(
+ int fn,
+ void *param,
+ Byte **op1,
+ size_t *len1,
+ const Byte **op2,
+ size_t *len2,
+ void *hist
+)
+{
+ Byte *t2 = op1 ? *op1 : NULL;
+ size_t t3 = len1 ? *len1 : 0;
+ const Byte *t4 = op2 ? *op2 : NULL;
+ size_t t5 = len2 ? *len2 : 0;
+ register int r0 __asm__("r0") = fn;
+ register void *r1 __asm__("r1") = param;
+ register Byte *r2 __asm__("r2") = t2;
+ register size_t r3 __asm__("r3") = t3;
+ register const Byte *r4 __asm__("r4") = t4;
+ register size_t r5 __asm__("r5") = t5;
+ int cc;
+
+ __asm__ volatile(
+ ".insn rrf,0xb9390000,%[r2],%[r4],%[hist],0\n"
+ "ipm %[cc]\n"
+ : [r2] "+r" (r2)
+ , [r3] "+r" (r3)
+ , [r4] "+r" (r4)
+ , [r5] "+r" (r5)
+ , [cc] "=r" (cc)
+ : [r0] "r" (r0)
+ , [r1] "r" (r1)
+ , [hist] "r" (hist)
+ : "cc", "memory");
+ t2 = r2; t3 = r3; t4 = r4; t5 = r5;
+
+ if (op1)
+ *op1 = t2;
+ if (len1)
+ *len1 = t3;
+ if (op2)
+ *op2 = t4;
+ if (len2)
+ *len2 = t5;
+ return (cc >> 28) & 3;
+}
+
+static inline int is_bit_set(
+ const char *bits,
+ int n
+)
+{
+ return bits[n / 8] & (1 << (7 - (n % 8)));
+}
+
+static inline void turn_bit_off(
+ char *bits,
+ int n
+)
+{
+ bits[n / 8] &= ~(1 << (7 - (n % 8)));
+}
+
+static inline int dfltcc_are_params_ok(
+ int level,
+ uInt window_bits,
+ int strategy,
+ uLong level_mask
+)
+{
+ return (level_mask & (1 << level)) != 0 &&
+ (window_bits == HB_BITS) &&
+ (strategy == Z_DEFAULT_STRATEGY);
+}
+
+char *oesc_msg(char *buf, int oesc);
+
+#endif /* DFLTCC_UTIL_H */
diff --git a/lib/zlib_inflate/Makefile b/lib/zlib_inflate/Makefile
new file mode 100644
index 000000000..27327d3e9
--- /dev/null
+++ b/lib/zlib_inflate/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is only the decompression, see zlib_deflate for the
+# the compression
+#
+# Decompression needs to be serialized for each memory
+# allocation.
+#
+# (The upsides of the simplification is that you can't get in
+# any nasty situations wrt memory management, and that the
+# uncompression can be done without blocking on allocation).
+#
+
+obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate.o
+
+zlib_inflate-objs := inffast.o inflate.o infutil.o \
+ inftrees.o inflate_syms.o
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
new file mode 100644
index 000000000..2843f9bb4
--- /dev/null
+++ b/lib/zlib_inflate/inffast.c
@@ -0,0 +1,341 @@
+/* inffast.c -- fast decoding
+ * Copyright (C) 1995-2004 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+
+#ifndef ASMINF
+
+union uu {
+ unsigned short us;
+ unsigned char b[2];
+};
+
+/* Endian independent version */
+static inline unsigned short
+get_unaligned16(const unsigned short *p)
+{
+ union uu mm;
+ unsigned char *b = (unsigned char *)p;
+
+ mm.b[0] = b[0];
+ mm.b[1] = b[1];
+ return mm.us;
+}
+
+/*
+ Decode literal, length, and distance codes and write out the resulting
+ literal and match bytes until either not enough input or output is
+ available, an end-of-block is encountered, or a data error is encountered.
+ When large enough input and output buffers are supplied to inflate(), for
+ example, a 16K input buffer and a 64K output buffer, more than 95% of the
+ inflate execution time is spent in this routine.
+
+ Entry assumptions:
+
+ state->mode == LEN
+ strm->avail_in >= 6
+ strm->avail_out >= 258
+ start >= strm->avail_out
+ state->bits < 8
+
+ On return, state->mode is one of:
+
+ LEN -- ran out of enough output space or enough available input
+ TYPE -- reached end of block code, inflate() to interpret next block
+ BAD -- error in block data
+
+ Notes:
+
+ - The maximum input bits used by a length/distance pair is 15 bits for the
+ length code, 5 bits for the length extra, 15 bits for the distance code,
+ and 13 bits for the distance extra. This totals 48 bits, or six bytes.
+ Therefore if strm->avail_in >= 6, then there is enough input to avoid
+ checking for available input while decoding.
+
+ - The maximum bytes that a single length/distance pair can output is 258
+ bytes, which is the maximum length that can be coded. inflate_fast()
+ requires strm->avail_out >= 258 for each loop to avoid checking for
+ output space.
+
+ - @start: inflate()'s starting value for strm->avail_out
+ */
+void inflate_fast(z_streamp strm, unsigned start)
+{
+ struct inflate_state *state;
+ const unsigned char *in; /* local strm->next_in */
+ const unsigned char *last; /* while in < last, enough input available */
+ unsigned char *out; /* local strm->next_out */
+ unsigned char *beg; /* inflate()'s initial strm->next_out */
+ unsigned char *end; /* while out < end, enough space available */
+#ifdef INFLATE_STRICT
+ unsigned dmax; /* maximum distance from zlib header */
+#endif
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned write; /* window write index */
+ unsigned char *window; /* allocated sliding window, if wsize != 0 */
+ unsigned long hold; /* local strm->hold */
+ unsigned bits; /* local strm->bits */
+ code const *lcode; /* local strm->lencode */
+ code const *dcode; /* local strm->distcode */
+ unsigned lmask; /* mask for first level of length codes */
+ unsigned dmask; /* mask for first level of distance codes */
+ code this; /* retrieved table entry */
+ unsigned op; /* code bits, operation, extra bits, or */
+ /* window position, window bytes to copy */
+ unsigned len; /* match length, unused bytes */
+ unsigned dist; /* match distance */
+ unsigned char *from; /* where to copy match from */
+
+ /* copy state to local variables */
+ state = (struct inflate_state *)strm->state;
+ in = strm->next_in;
+ last = in + (strm->avail_in - 5);
+ out = strm->next_out;
+ beg = out - (start - strm->avail_out);
+ end = out + (strm->avail_out - 257);
+#ifdef INFLATE_STRICT
+ dmax = state->dmax;
+#endif
+ wsize = state->wsize;
+ whave = state->whave;
+ write = state->write;
+ window = state->window;
+ hold = state->hold;
+ bits = state->bits;
+ lcode = state->lencode;
+ dcode = state->distcode;
+ lmask = (1U << state->lenbits) - 1;
+ dmask = (1U << state->distbits) - 1;
+
+ /* decode literals and length/distances until end-of-block or not enough
+ input data or output space */
+ do {
+ if (bits < 15) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ this = lcode[hold & lmask];
+ dolen:
+ op = (unsigned)(this.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(this.op);
+ if (op == 0) { /* literal */
+ *out++ = (unsigned char)(this.val);
+ }
+ else if (op & 16) { /* length base */
+ len = (unsigned)(this.val);
+ op &= 15; /* number of extra bits */
+ if (op) {
+ if (bits < op) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ len += (unsigned)hold & ((1U << op) - 1);
+ hold >>= op;
+ bits -= op;
+ }
+ if (bits < 15) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ this = dcode[hold & dmask];
+ dodist:
+ op = (unsigned)(this.bits);
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(this.op);
+ if (op & 16) { /* distance base */
+ dist = (unsigned)(this.val);
+ op &= 15; /* number of extra bits */
+ if (bits < op) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ if (bits < op) {
+ hold += (unsigned long)(*in++) << bits;
+ bits += 8;
+ }
+ }
+ dist += (unsigned)hold & ((1U << op) - 1);
+#ifdef INFLATE_STRICT
+ if (dist > dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ hold >>= op;
+ bits -= op;
+ op = (unsigned)(out - beg); /* max distance in output */
+ if (dist > op) { /* see if copy from window */
+ op = dist - op; /* distance back in window */
+ if (op > whave) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+ from = window;
+ if (write == 0) { /* very common case */
+ from += wsize - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ else if (write < op) { /* wrap around window */
+ from += wsize + write - op;
+ op -= write;
+ if (op < len) { /* some from end of window */
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = window;
+ if (write < len) { /* some from start of window */
+ op = write;
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ }
+ else { /* contiguous in window */
+ from += write - op;
+ if (op < len) { /* some from window */
+ len -= op;
+ do {
+ *out++ = *from++;
+ } while (--op);
+ from = out - dist; /* rest from output */
+ }
+ }
+ while (len > 2) {
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
+ len -= 3;
+ }
+ if (len) {
+ *out++ = *from++;
+ if (len > 1)
+ *out++ = *from++;
+ }
+ }
+ else {
+ unsigned short *sout;
+ unsigned long loops;
+
+ from = out - dist; /* copy direct from output */
+ /* minimum length is three */
+ /* Align out addr */
+ if (!((long)(out - 1) & 1)) {
+ *out++ = *from++;
+ len--;
+ }
+ sout = (unsigned short *)(out);
+ if (dist > 2) {
+ unsigned short *sfrom;
+
+ sfrom = (unsigned short *)(from);
+ loops = len >> 1;
+ do {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ *sout++ = *sfrom++;
+ else
+ *sout++ = get_unaligned16(sfrom++);
+ } while (--loops);
+ out = (unsigned char *)sout;
+ from = (unsigned char *)sfrom;
+ } else { /* dist == 1 or dist == 2 */
+ unsigned short pat16;
+
+ pat16 = *(sout-1);
+ if (dist == 1) {
+ union uu mm;
+ /* copy one char pattern to both bytes */
+ mm.us = pat16;
+ mm.b[0] = mm.b[1];
+ pat16 = mm.us;
+ }
+ loops = len >> 1;
+ do
+ *sout++ = pat16;
+ while (--loops);
+ out = (unsigned char *)sout;
+ }
+ if (len & 1)
+ *out++ = *from++;
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level distance code */
+ this = dcode[this.val + (hold & ((1U << op) - 1))];
+ goto dodist;
+ }
+ else {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ }
+ else if ((op & 64) == 0) { /* 2nd level length code */
+ this = lcode[this.val + (hold & ((1U << op) - 1))];
+ goto dolen;
+ }
+ else if (op & 32) { /* end-of-block */
+ state->mode = TYPE;
+ break;
+ }
+ else {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ } while (in < last && out < end);
+
+ /* return unused bytes (on entry, bits < 8, so in won't go too far back) */
+ len = bits >> 3;
+ in -= len;
+ bits -= len << 3;
+ hold &= (1U << bits) - 1;
+
+ /* update state and return */
+ strm->next_in = in;
+ strm->next_out = out;
+ strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
+ strm->avail_out = (unsigned)(out < end ?
+ 257 + (end - out) : 257 - (out - end));
+ state->hold = hold;
+ state->bits = bits;
+ return;
+}
+
+/*
+ inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe):
+ - Using bit fields for code structure
+ - Different op definition to avoid & for extra bits (do & for table bits)
+ - Three separate decoding do-loops for direct, window, and write == 0
+ - Special case for distance > 1 copies to do overlapped load and store copy
+ - Explicit branch predictions (based on measured branch probabilities)
+ - Deferring match copy and interspersed it with decoding subsequent codes
+ - Swapping literal/length else
+ - Swapping window/direct else
+ - Larger unrolled copy loops (three is about right)
+ - Moving len -= 3 statement into middle of loop
+ */
+
+#endif /* !ASMINF */
diff --git a/lib/zlib_inflate/inffast.h b/lib/zlib_inflate/inffast.h
new file mode 100644
index 000000000..40315d9fd
--- /dev/null
+++ b/lib/zlib_inflate/inffast.h
@@ -0,0 +1,11 @@
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-2003 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+void inflate_fast (z_streamp strm, unsigned start);
diff --git a/lib/zlib_inflate/inffixed.h b/lib/zlib_inflate/inffixed.h
new file mode 100644
index 000000000..75ed4b597
--- /dev/null
+++ b/lib/zlib_inflate/inffixed.h
@@ -0,0 +1,94 @@
+ /* inffixed.h -- table for decoding fixed codes
+ * Generated automatically by makefixed().
+ */
+
+ /* WARNING: this file should *not* be used by applications. It
+ is part of the implementation of the compression library and
+ is subject to change. Applications should only use zlib.h.
+ */
+
+ static const code lenfix[512] = {
+ {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
+ {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
+ {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
+ {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
+ {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
+ {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
+ {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
+ {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
+ {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
+ {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
+ {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
+ {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
+ {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
+ {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
+ {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
+ {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
+ {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
+ {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
+ {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
+ {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
+ {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
+ {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
+ {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
+ {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
+ {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
+ {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
+ {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
+ {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
+ {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
+ {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
+ {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
+ {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
+ {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
+ {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
+ {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
+ {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
+ {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
+ {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
+ {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
+ {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
+ {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
+ {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
+ {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
+ {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
+ {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
+ {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
+ {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
+ {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
+ {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
+ {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
+ {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
+ {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
+ {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
+ {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
+ {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
+ {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
+ {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
+ {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
+ {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
+ {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
+ {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
+ {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
+ {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
+ {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
+ {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
+ {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
+ {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
+ {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
+ {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
+ {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
+ {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
+ {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
+ {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
+ {0,9,255}
+ };
+
+ static const code distfix[32] = {
+ {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
+ {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
+ {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
+ {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
+ {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
+ {22,5,193},{64,5,0}
+ };
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
new file mode 100644
index 000000000..d1efad69f
--- /dev/null
+++ b/lib/zlib_inflate/inflate.c
@@ -0,0 +1,814 @@
+/* inflate.c -- zlib decompression
+ * Copyright (C) 1995-2005 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ *
+ * Based on zlib 1.2.3 but modified for the Linux Kernel by
+ * Richard Purdie <richard@openedhand.com>
+ *
+ * Changes mainly for static instead of dynamic memory allocation
+ *
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "inflate.h"
+#include "inffast.h"
+#include "infutil.h"
+
+/* architecture-specific bits */
+#ifdef CONFIG_ZLIB_DFLTCC
+# include "../zlib_dfltcc/dfltcc_inflate.h"
+#else
+#define INFLATE_RESET_HOOK(strm) do {} while (0)
+#define INFLATE_TYPEDO_HOOK(strm, flush) do {} while (0)
+#define INFLATE_NEED_UPDATEWINDOW(strm) 1
+#define INFLATE_NEED_CHECKSUM(strm) 1
+#endif
+
+int zlib_inflate_workspacesize(void)
+{
+ return sizeof(struct inflate_workspace);
+}
+
+int zlib_inflateReset(z_streamp strm)
+{
+ struct inflate_state *state;
+
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+ strm->total_in = strm->total_out = state->total = 0;
+ strm->msg = NULL;
+ strm->adler = 1; /* to support ill-conceived Java test suite */
+ state->mode = HEAD;
+ state->last = 0;
+ state->havedict = 0;
+ state->dmax = 32768U;
+ state->hold = 0;
+ state->bits = 0;
+ state->lencode = state->distcode = state->next = state->codes;
+
+ /* Initialise Window */
+ state->wsize = 1U << state->wbits;
+ state->write = 0;
+ state->whave = 0;
+
+ INFLATE_RESET_HOOK(strm);
+ return Z_OK;
+}
+
+int zlib_inflateInit2(z_streamp strm, int windowBits)
+{
+ struct inflate_state *state;
+
+ if (strm == NULL) return Z_STREAM_ERROR;
+ strm->msg = NULL; /* in case we return an error */
+
+ state = &WS(strm)->inflate_state;
+ strm->state = (struct internal_state *)state;
+
+ if (windowBits < 0) {
+ state->wrap = 0;
+ windowBits = -windowBits;
+ }
+ else {
+ state->wrap = (windowBits >> 4) + 1;
+ }
+ if (windowBits < 8 || windowBits > 15) {
+ return Z_STREAM_ERROR;
+ }
+ state->wbits = (unsigned)windowBits;
+#ifdef CONFIG_ZLIB_DFLTCC
+ /*
+ * DFLTCC requires the window to be page aligned.
+ * Thus, we overallocate and take the aligned portion of the buffer.
+ */
+ state->window = PTR_ALIGN(&WS(strm)->working_window[0], PAGE_SIZE);
+#else
+ state->window = &WS(strm)->working_window[0];
+#endif
+
+ return zlib_inflateReset(strm);
+}
+
+/*
+ Return state with length and distance decoding tables and index sizes set to
+ fixed code decoding. This returns fixed tables from inffixed.h.
+ */
+static void zlib_fixedtables(struct inflate_state *state)
+{
+# include "inffixed.h"
+ state->lencode = lenfix;
+ state->lenbits = 9;
+ state->distcode = distfix;
+ state->distbits = 5;
+}
+
+
+/*
+ Update the window with the last wsize (normally 32K) bytes written before
+ returning. This is only called when a window is already in use, or when
+ output has been written during this inflate call, but the end of the deflate
+ stream has not been reached yet. It is also called to window dictionary data
+ when a dictionary is loaded.
+
+ Providing output buffers larger than 32K to inflate() should provide a speed
+ advantage, since only the last 32K of output is copied to the sliding window
+ upon return from inflate(), and since all distances after the first 32K of
+ output will fall in the output data, making match copies simpler and faster.
+ The advantage may be dependent on the size of the processor's data caches.
+ */
+static void zlib_updatewindow(z_streamp strm, unsigned out)
+{
+ struct inflate_state *state;
+ unsigned copy, dist;
+
+ state = (struct inflate_state *)strm->state;
+
+ /* copy state->wsize or less output bytes into the circular window */
+ copy = out - strm->avail_out;
+ if (copy >= state->wsize) {
+ memcpy(state->window, strm->next_out - state->wsize, state->wsize);
+ state->write = 0;
+ state->whave = state->wsize;
+ }
+ else {
+ dist = state->wsize - state->write;
+ if (dist > copy) dist = copy;
+ memcpy(state->window + state->write, strm->next_out - copy, dist);
+ copy -= dist;
+ if (copy) {
+ memcpy(state->window, strm->next_out - copy, copy);
+ state->write = copy;
+ state->whave = state->wsize;
+ }
+ else {
+ state->write += dist;
+ if (state->write == state->wsize) state->write = 0;
+ if (state->whave < state->wsize) state->whave += dist;
+ }
+ }
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+/*
+ Returns true if inflate is currently at the end of a block generated by
+ Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ implementation to provide an additional safety check. PPP uses
+ Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored
+ block. When decompressing, PPP checks that at the end of input packet,
+ inflate is waiting for these length bytes.
+ */
+static int zlib_inflateSyncPacket(z_streamp strm)
+{
+ struct inflate_state *state;
+
+ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
+ state = (struct inflate_state *)strm->state;
+
+ if (state->mode == STORED && state->bits == 0) {
+ state->mode = TYPE;
+ return Z_OK;
+ }
+ return Z_DATA_ERROR;
+}
+
+/* Macros for inflate(): */
+
+/* check function to use adler32() for zlib or crc32() for gzip */
+#define UPDATE(check, buf, len) zlib_adler32(check, buf, len)
+
+/* Load registers with state in inflate() for speed */
+#define LOAD() \
+ do { \
+ put = strm->next_out; \
+ left = strm->avail_out; \
+ next = strm->next_in; \
+ have = strm->avail_in; \
+ hold = state->hold; \
+ bits = state->bits; \
+ } while (0)
+
+/* Restore state from registers in inflate() */
+#define RESTORE() \
+ do { \
+ strm->next_out = put; \
+ strm->avail_out = left; \
+ strm->next_in = next; \
+ strm->avail_in = have; \
+ state->hold = hold; \
+ state->bits = bits; \
+ } while (0)
+
+/* Clear the input bit accumulator */
+#define INITBITS() \
+ do { \
+ hold = 0; \
+ bits = 0; \
+ } while (0)
+
+/* Get a byte of input into the bit accumulator, or return from inflate()
+ if there is no input available. */
+#define PULLBYTE() \
+ do { \
+ if (have == 0) goto inf_leave; \
+ have--; \
+ hold += (unsigned long)(*next++) << bits; \
+ bits += 8; \
+ } while (0)
+
+/* Assure that there are at least n bits in the bit accumulator. If there is
+ not enough available input to do that, then return from inflate(). */
+#define NEEDBITS(n) \
+ do { \
+ while (bits < (unsigned)(n)) \
+ PULLBYTE(); \
+ } while (0)
+
+/* Return the low n bits of the bit accumulator (n < 16) */
+#define BITS(n) \
+ ((unsigned)hold & ((1U << (n)) - 1))
+
+/* Remove n bits from the bit accumulator */
+#define DROPBITS(n) \
+ do { \
+ hold >>= (n); \
+ bits -= (unsigned)(n); \
+ } while (0)
+
+/* Remove zero to seven bits as needed to go to a byte boundary */
+#define BYTEBITS() \
+ do { \
+ hold >>= bits & 7; \
+ bits -= bits & 7; \
+ } while (0)
+
+/*
+ inflate() uses a state machine to process as much input data and generate as
+ much output data as possible before returning. The state machine is
+ structured roughly as follows:
+
+ for (;;) switch (state) {
+ ...
+ case STATEn:
+ if (not enough input data or output space to make progress)
+ return;
+ ... make progress ...
+ state = STATEm;
+ break;
+ ...
+ }
+
+ so when inflate() is called again, the same case is attempted again, and
+ if the appropriate resources are provided, the machine proceeds to the
+ next state. The NEEDBITS() macro is usually the way the state evaluates
+ whether it can proceed or should return. NEEDBITS() does the return if
+ the requested bits are not available. The typical use of the BITS macros
+ is:
+
+ NEEDBITS(n);
+ ... do something with BITS(n) ...
+ DROPBITS(n);
+
+ where NEEDBITS(n) either returns from inflate() if there isn't enough
+ input left to load n bits into the accumulator, or it continues. BITS(n)
+ gives the low n bits in the accumulator. When done, DROPBITS(n) drops
+ the low n bits off the accumulator. INITBITS() clears the accumulator
+ and sets the number of available bits to zero. BYTEBITS() discards just
+ enough bits to put the accumulator on a byte boundary. After BYTEBITS()
+ and a NEEDBITS(8), then BITS(8) would return the next byte in the stream.
+
+ NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return
+ if there is no input available. The decoding of variable length codes uses
+ PULLBYTE() directly in order to pull just enough bytes to decode the next
+ code, and no more.
+
+ Some states loop until they get enough input, making sure that enough
+ state information is maintained to continue the loop where it left off
+ if NEEDBITS() returns in the loop. For example, want, need, and keep
+ would all have to actually be part of the saved state in case NEEDBITS()
+ returns:
+
+ case STATEw:
+ while (want < need) {
+ NEEDBITS(n);
+ keep[want++] = BITS(n);
+ DROPBITS(n);
+ }
+ state = STATEx;
+ case STATEx:
+
+ As shown above, if the next state is also the next case, then the break
+ is omitted.
+
+ A state may also return if there is not enough output space available to
+ complete that state. Those states are copying stored data, writing a
+ literal byte, and copying a matching string.
+
+ When returning, a "goto inf_leave" is used to update the total counters,
+ update the check value, and determine whether any progress has been made
+ during that inflate() call in order to return the proper return code.
+ Progress is defined as a change in either strm->avail_in or strm->avail_out.
+ When there is a window, goto inf_leave will update the window with the last
+ output written. If a goto inf_leave occurs in the middle of decompression
+ and there is no window currently, goto inf_leave will create one and copy
+ output to the window for the next call of inflate().
+
+ In this implementation, the flush parameter of inflate() only affects the
+ return code (per zlib.h). inflate() always writes as much as possible to
+ strm->next_out, given the space available and the provided input--the effect
+ documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers
+ the allocation of and copying into a sliding window until necessary, which
+ provides the effect documented in zlib.h for Z_FINISH when the entire input
+ stream available. So the only thing the flush parameter actually does is:
+ when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it
+ will return Z_BUF_ERROR if it has not reached the end of the stream.
+ */
+
+int zlib_inflate(z_streamp strm, int flush)
+{
+ struct inflate_state *state;
+ const unsigned char *next; /* next input */
+ unsigned char *put; /* next output */
+ unsigned have, left; /* available input and output */
+ unsigned long hold; /* bit buffer */
+ unsigned bits; /* bits in bit buffer */
+ unsigned in, out; /* save starting available input and output */
+ unsigned copy; /* number of stored or match bytes to copy */
+ unsigned char *from; /* where to copy match bytes from */
+ code this; /* current decoding table entry */
+ code last; /* parent table entry */
+ unsigned len; /* length to copy for repeats, bits to drop */
+ int ret; /* return code */
+ static const unsigned short order[19] = /* permutation of code lengths */
+ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+ /* Do not check for strm->next_out == NULL here as ppc zImage
+ inflates to strm->next_out = 0 */
+
+ if (strm == NULL || strm->state == NULL ||
+ (strm->next_in == NULL && strm->avail_in != 0))
+ return Z_STREAM_ERROR;
+
+ state = (struct inflate_state *)strm->state;
+
+ if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */
+ LOAD();
+ in = have;
+ out = left;
+ ret = Z_OK;
+ for (;;)
+ switch (state->mode) {
+ case HEAD:
+ if (state->wrap == 0) {
+ state->mode = TYPEDO;
+ break;
+ }
+ NEEDBITS(16);
+ if (
+ ((BITS(8) << 8) + (hold >> 8)) % 31) {
+ strm->msg = (char *)"incorrect header check";
+ state->mode = BAD;
+ break;
+ }
+ if (BITS(4) != Z_DEFLATED) {
+ strm->msg = (char *)"unknown compression method";
+ state->mode = BAD;
+ break;
+ }
+ DROPBITS(4);
+ len = BITS(4) + 8;
+ if (len > state->wbits) {
+ strm->msg = (char *)"invalid window size";
+ state->mode = BAD;
+ break;
+ }
+ state->dmax = 1U << len;
+ strm->adler = state->check = zlib_adler32(0L, NULL, 0);
+ state->mode = hold & 0x200 ? DICTID : TYPE;
+ INITBITS();
+ break;
+ case DICTID:
+ NEEDBITS(32);
+ strm->adler = state->check = REVERSE(hold);
+ INITBITS();
+ state->mode = DICT;
+ fallthrough;
+ case DICT:
+ if (state->havedict == 0) {
+ RESTORE();
+ return Z_NEED_DICT;
+ }
+ strm->adler = state->check = zlib_adler32(0L, NULL, 0);
+ state->mode = TYPE;
+ fallthrough;
+ case TYPE:
+ if (flush == Z_BLOCK) goto inf_leave;
+ fallthrough;
+ case TYPEDO:
+ INFLATE_TYPEDO_HOOK(strm, flush);
+ if (state->last) {
+ BYTEBITS();
+ state->mode = CHECK;
+ break;
+ }
+ NEEDBITS(3);
+ state->last = BITS(1);
+ DROPBITS(1);
+ switch (BITS(2)) {
+ case 0: /* stored block */
+ state->mode = STORED;
+ break;
+ case 1: /* fixed block */
+ zlib_fixedtables(state);
+ state->mode = LEN; /* decode codes */
+ break;
+ case 2: /* dynamic block */
+ state->mode = TABLE;
+ break;
+ case 3:
+ strm->msg = (char *)"invalid block type";
+ state->mode = BAD;
+ }
+ DROPBITS(2);
+ break;
+ case STORED:
+ BYTEBITS(); /* go to byte boundary */
+ NEEDBITS(32);
+ if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
+ strm->msg = (char *)"invalid stored block lengths";
+ state->mode = BAD;
+ break;
+ }
+ state->length = (unsigned)hold & 0xffff;
+ INITBITS();
+ state->mode = COPY;
+ fallthrough;
+ case COPY:
+ copy = state->length;
+ if (copy) {
+ if (copy > have) copy = have;
+ if (copy > left) copy = left;
+ if (copy == 0) goto inf_leave;
+ memcpy(put, next, copy);
+ have -= copy;
+ next += copy;
+ left -= copy;
+ put += copy;
+ state->length -= copy;
+ break;
+ }
+ state->mode = TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14);
+ state->nlen = BITS(5) + 257;
+ DROPBITS(5);
+ state->ndist = BITS(5) + 1;
+ DROPBITS(5);
+ state->ncode = BITS(4) + 4;
+ DROPBITS(4);
+#ifndef PKZIP_BUG_WORKAROUND
+ if (state->nlen > 286 || state->ndist > 30) {
+ strm->msg = (char *)"too many length or distance symbols";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ state->have = 0;
+ state->mode = LENLENS;
+ fallthrough;
+ case LENLENS:
+ while (state->have < state->ncode) {
+ NEEDBITS(3);
+ state->lens[order[state->have++]] = (unsigned short)BITS(3);
+ DROPBITS(3);
+ }
+ while (state->have < 19)
+ state->lens[order[state->have++]] = 0;
+ state->next = state->codes;
+ state->lencode = (code const *)(state->next);
+ state->lenbits = 7;
+ ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid code lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->have = 0;
+ state->mode = CODELENS;
+ fallthrough;
+ case CODELENS:
+ while (state->have < state->nlen + state->ndist) {
+ for (;;) {
+ this = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (this.val < 16) {
+ NEEDBITS(this.bits);
+ DROPBITS(this.bits);
+ state->lens[state->have++] = this.val;
+ }
+ else {
+ if (this.val == 16) {
+ NEEDBITS(this.bits + 2);
+ DROPBITS(this.bits);
+ if (state->have == 0) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ len = state->lens[state->have - 1];
+ copy = 3 + BITS(2);
+ DROPBITS(2);
+ }
+ else if (this.val == 17) {
+ NEEDBITS(this.bits + 3);
+ DROPBITS(this.bits);
+ len = 0;
+ copy = 3 + BITS(3);
+ DROPBITS(3);
+ }
+ else {
+ NEEDBITS(this.bits + 7);
+ DROPBITS(this.bits);
+ len = 0;
+ copy = 11 + BITS(7);
+ DROPBITS(7);
+ }
+ if (state->have + copy > state->nlen + state->ndist) {
+ strm->msg = (char *)"invalid bit length repeat";
+ state->mode = BAD;
+ break;
+ }
+ while (copy--)
+ state->lens[state->have++] = (unsigned short)len;
+ }
+ }
+
+ /* handle error breaks in while */
+ if (state->mode == BAD) break;
+
+ /* build code tables */
+ state->next = state->codes;
+ state->lencode = (code const *)(state->next);
+ state->lenbits = 9;
+ ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next),
+ &(state->lenbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid literal/lengths set";
+ state->mode = BAD;
+ break;
+ }
+ state->distcode = (code const *)(state->next);
+ state->distbits = 6;
+ ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist,
+ &(state->next), &(state->distbits), state->work);
+ if (ret) {
+ strm->msg = (char *)"invalid distances set";
+ state->mode = BAD;
+ break;
+ }
+ state->mode = LEN;
+ fallthrough;
+ case LEN:
+ if (have >= 6 && left >= 258) {
+ RESTORE();
+ inflate_fast(strm, out);
+ LOAD();
+ break;
+ }
+ for (;;) {
+ this = state->lencode[BITS(state->lenbits)];
+ if ((unsigned)(this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if (this.op && (this.op & 0xf0) == 0) {
+ last = this;
+ for (;;) {
+ this = state->lencode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(this.bits);
+ state->length = (unsigned)this.val;
+ if ((int)(this.op) == 0) {
+ state->mode = LIT;
+ break;
+ }
+ if (this.op & 32) {
+ state->mode = TYPE;
+ break;
+ }
+ if (this.op & 64) {
+ strm->msg = (char *)"invalid literal/length code";
+ state->mode = BAD;
+ break;
+ }
+ state->extra = (unsigned)(this.op) & 15;
+ state->mode = LENEXT;
+ fallthrough;
+ case LENEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->length += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+ state->mode = DIST;
+ fallthrough;
+ case DIST:
+ for (;;) {
+ this = state->distcode[BITS(state->distbits)];
+ if ((unsigned)(this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ if ((this.op & 0xf0) == 0) {
+ last = this;
+ for (;;) {
+ this = state->distcode[last.val +
+ (BITS(last.bits + last.op) >> last.bits)];
+ if ((unsigned)(last.bits + this.bits) <= bits) break;
+ PULLBYTE();
+ }
+ DROPBITS(last.bits);
+ }
+ DROPBITS(this.bits);
+ if (this.op & 64) {
+ strm->msg = (char *)"invalid distance code";
+ state->mode = BAD;
+ break;
+ }
+ state->offset = (unsigned)this.val;
+ state->extra = (unsigned)(this.op) & 15;
+ state->mode = DISTEXT;
+ fallthrough;
+ case DISTEXT:
+ if (state->extra) {
+ NEEDBITS(state->extra);
+ state->offset += BITS(state->extra);
+ DROPBITS(state->extra);
+ }
+#ifdef INFLATE_STRICT
+ if (state->offset > state->dmax) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+#endif
+ if (state->offset > state->whave + out - left) {
+ strm->msg = (char *)"invalid distance too far back";
+ state->mode = BAD;
+ break;
+ }
+ state->mode = MATCH;
+ fallthrough;
+ case MATCH:
+ if (left == 0) goto inf_leave;
+ copy = out - left;
+ if (state->offset > copy) { /* copy from window */
+ copy = state->offset - copy;
+ if (copy > state->write) {
+ copy -= state->write;
+ from = state->window + (state->wsize - copy);
+ }
+ else
+ from = state->window + (state->write - copy);
+ if (copy > state->length) copy = state->length;
+ }
+ else { /* copy from output */
+ from = put - state->offset;
+ copy = state->length;
+ }
+ if (copy > left) copy = left;
+ left -= copy;
+ state->length -= copy;
+ do {
+ *put++ = *from++;
+ } while (--copy);
+ if (state->length == 0) state->mode = LEN;
+ break;
+ case LIT:
+ if (left == 0) goto inf_leave;
+ *put++ = (unsigned char)(state->length);
+ left--;
+ state->mode = LEN;
+ break;
+ case CHECK:
+ if (state->wrap) {
+ NEEDBITS(32);
+ out -= left;
+ strm->total_out += out;
+ state->total += out;
+ if (INFLATE_NEED_CHECKSUM(strm) && out)
+ strm->adler = state->check =
+ UPDATE(state->check, put - out, out);
+ out = left;
+ if ((
+ REVERSE(hold)) != state->check) {
+ strm->msg = (char *)"incorrect data check";
+ state->mode = BAD;
+ break;
+ }
+ INITBITS();
+ }
+ state->mode = DONE;
+ fallthrough;
+ case DONE:
+ ret = Z_STREAM_END;
+ goto inf_leave;
+ case BAD:
+ ret = Z_DATA_ERROR;
+ goto inf_leave;
+ case MEM:
+ return Z_MEM_ERROR;
+ case SYNC:
+ default:
+ return Z_STREAM_ERROR;
+ }
+
+ /*
+ Return from inflate(), updating the total counts and the check value.
+ If there was no progress during the inflate() call, return a buffer
+ error. Call zlib_updatewindow() to create and/or update the window state.
+ */
+ inf_leave:
+ RESTORE();
+ if (INFLATE_NEED_UPDATEWINDOW(strm) &&
+ (state->wsize || (state->mode < CHECK && out != strm->avail_out)))
+ zlib_updatewindow(strm, out);
+
+ in -= strm->avail_in;
+ out -= strm->avail_out;
+ strm->total_in += in;
+ strm->total_out += out;
+ state->total += out;
+ if (INFLATE_NEED_CHECKSUM(strm) && state->wrap && out)
+ strm->adler = state->check =
+ UPDATE(state->check, strm->next_out - out, out);
+
+ strm->data_type = state->bits + (state->last ? 64 : 0) +
+ (state->mode == TYPE ? 128 : 0);
+
+ if (flush == Z_PACKET_FLUSH && ret == Z_OK &&
+ strm->avail_out != 0 && strm->avail_in == 0)
+ return zlib_inflateSyncPacket(strm);
+
+ if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK)
+ ret = Z_BUF_ERROR;
+
+ return ret;
+}
+
+int zlib_inflateEnd(z_streamp strm)
+{
+ if (strm == NULL || strm->state == NULL)
+ return Z_STREAM_ERROR;
+ return Z_OK;
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output but this should always be the case. The state must
+ * be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit,
+ * the output will also be caught up, and the checksum will have been updated
+ * if need be.
+ */
+int zlib_inflateIncomp(z_stream *z)
+{
+ struct inflate_state *state = (struct inflate_state *)z->state;
+ Byte *saved_no = z->next_out;
+ uInt saved_ao = z->avail_out;
+
+ if (state->mode != TYPE && state->mode != HEAD)
+ return Z_DATA_ERROR;
+
+ /* Setup some variables to allow misuse of updateWindow */
+ z->avail_out = 0;
+ z->next_out = (unsigned char*)z->next_in + z->avail_in;
+
+ zlib_updatewindow(z, z->avail_in);
+
+ /* Restore saved variables */
+ z->avail_out = saved_ao;
+ z->next_out = saved_no;
+
+ z->adler = state->check =
+ UPDATE(state->check, z->next_in, z->avail_in);
+
+ z->total_out += z->avail_in;
+ z->total_in += z->avail_in;
+ z->next_in += z->avail_in;
+ state->total += z->avail_in;
+ z->avail_in = 0;
+
+ return Z_OK;
+}
diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h
new file mode 100644
index 000000000..f79337ddf
--- /dev/null
+++ b/lib/zlib_inflate/inflate.h
@@ -0,0 +1,119 @@
+#ifndef INFLATE_H
+#define INFLATE_H
+
+/* inflate.h -- internal inflate state definition
+ * Copyright (C) 1995-2004 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#include "inftrees.h"
+
+/* Possible inflate modes between inflate() calls */
+typedef enum {
+ HEAD, /* i: waiting for magic header */
+ FLAGS, /* i: waiting for method and flags (gzip) */
+ TIME, /* i: waiting for modification time (gzip) */
+ OS, /* i: waiting for extra flags and operating system (gzip) */
+ EXLEN, /* i: waiting for extra length (gzip) */
+ EXTRA, /* i: waiting for extra bytes (gzip) */
+ NAME, /* i: waiting for end of file name (gzip) */
+ COMMENT, /* i: waiting for end of comment (gzip) */
+ HCRC, /* i: waiting for header crc (gzip) */
+ DICTID, /* i: waiting for dictionary check value */
+ DICT, /* waiting for inflateSetDictionary() call */
+ TYPE, /* i: waiting for type bits, including last-flag bit */
+ TYPEDO, /* i: same, but skip check to exit inflate on new block */
+ STORED, /* i: waiting for stored size (length and complement) */
+ COPY, /* i/o: waiting for input or output to copy stored block */
+ TABLE, /* i: waiting for dynamic block table lengths */
+ LENLENS, /* i: waiting for code length code lengths */
+ CODELENS, /* i: waiting for length/lit and distance code lengths */
+ LEN, /* i: waiting for length/lit code */
+ LENEXT, /* i: waiting for length extra bits */
+ DIST, /* i: waiting for distance code */
+ DISTEXT, /* i: waiting for distance extra bits */
+ MATCH, /* o: waiting for output space to copy string */
+ LIT, /* o: waiting for output space to write literal */
+ CHECK, /* i: waiting for 32-bit check value */
+ LENGTH, /* i: waiting for 32-bit length (gzip) */
+ DONE, /* finished check, done -- remain here until reset */
+ BAD, /* got a data error -- remain here until reset */
+ MEM, /* got an inflate() memory error -- remain here until reset */
+ SYNC /* looking for synchronization bytes to restart inflate() */
+} inflate_mode;
+
+/*
+ State transitions between above modes -
+
+ (most modes can go to the BAD or MEM mode -- not shown for clarity)
+
+ Process header:
+ HEAD -> (gzip) or (zlib)
+ (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME
+ NAME -> COMMENT -> HCRC -> TYPE
+ (zlib) -> DICTID or TYPE
+ DICTID -> DICT -> TYPE
+ Read deflate blocks:
+ TYPE -> STORED or TABLE or LEN or CHECK
+ STORED -> COPY -> TYPE
+ TABLE -> LENLENS -> CODELENS -> LEN
+ Read deflate codes:
+ LEN -> LENEXT or LIT or TYPE
+ LENEXT -> DIST -> DISTEXT -> MATCH -> LEN
+ LIT -> LEN
+ Process trailer:
+ CHECK -> LENGTH -> DONE
+ */
+
+/* state maintained between inflate() calls. Approximately 7K bytes. */
+struct inflate_state {
+ inflate_mode mode; /* current inflate mode */
+ int last; /* true if processing last block */
+ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */
+ int havedict; /* true if dictionary provided */
+ int flags; /* gzip header method and flags (0 if zlib) */
+ unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */
+ unsigned long check; /* protected copy of check value */
+ unsigned long total; /* protected copy of output count */
+ /* gz_headerp head; */ /* where to save gzip header information */
+ /* sliding window */
+ unsigned wbits; /* log base 2 of requested window size */
+ unsigned wsize; /* window size or zero if not using window */
+ unsigned whave; /* valid bytes in the window */
+ unsigned write; /* window write index */
+ unsigned char *window; /* allocated sliding window, if needed */
+ /* bit accumulator */
+ unsigned long hold; /* input bit accumulator */
+ unsigned bits; /* number of bits in "in" */
+ /* for string and stored block copying */
+ unsigned length; /* literal or length of data to copy */
+ unsigned offset; /* distance back to copy string from */
+ /* for table and code decoding */
+ unsigned extra; /* extra bits needed */
+ /* fixed and dynamic code tables */
+ code const *lencode; /* starting table for length/literal codes */
+ code const *distcode; /* starting table for distance codes */
+ unsigned lenbits; /* index bits for lencode */
+ unsigned distbits; /* index bits for distcode */
+ /* dynamic table building */
+ unsigned ncode; /* number of code length code lengths */
+ unsigned nlen; /* number of length code lengths */
+ unsigned ndist; /* number of distance code lengths */
+ unsigned have; /* number of code lengths in lens[] */
+ code *next; /* next available space in codes[] */
+ unsigned short lens[320]; /* temporary storage for code lengths */
+ unsigned short work[288]; /* work area for code table building */
+ code codes[ENOUGH]; /* space for code tables */
+};
+
+/* Reverse the bytes in a 32-bit value */
+#define REVERSE(q) \
+ ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
+ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
+
+#endif
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
new file mode 100644
index 000000000..9720114c0
--- /dev/null
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * linux/lib/zlib_inflate/inflate_syms.c
+ *
+ * Exported symbols for the inflate functionality.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/zlib.h>
+
+EXPORT_SYMBOL(zlib_inflate_workspacesize);
+EXPORT_SYMBOL(zlib_inflate);
+EXPORT_SYMBOL(zlib_inflateInit2);
+EXPORT_SYMBOL(zlib_inflateEnd);
+EXPORT_SYMBOL(zlib_inflateReset);
+EXPORT_SYMBOL(zlib_inflateIncomp);
+EXPORT_SYMBOL(zlib_inflate_blob);
+MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
new file mode 100644
index 000000000..028943052
--- /dev/null
+++ b/lib/zlib_inflate/inftrees.c
@@ -0,0 +1,315 @@
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-2005 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+
+#define MAXBITS 15
+
+/*
+ Build a set of tables to decode the provided canonical Huffman code.
+ The code lengths are lens[0..codes-1]. The result starts at *table,
+ whose indices are 0..2^bits-1. work is a writable array of at least
+ lens shorts, which is used as a work area. type is the type of code
+ to be generated, CODES, LENS, or DISTS. On return, zero is success,
+ -1 is an invalid code, and +1 means that ENOUGH isn't enough. table
+ on return points to the next available entry's address. bits is the
+ requested root table index bits, and on return it is the actual root
+ table index bits. It will differ if the request is greater than the
+ longest code or if it is less than the shortest code.
+ */
+int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
+ code **table, unsigned *bits, unsigned short *work)
+{
+ unsigned len; /* a code's length in bits */
+ unsigned sym; /* index of code symbols */
+ unsigned min, max; /* minimum and maximum code lengths */
+ unsigned root; /* number of index bits for root table */
+ unsigned curr; /* number of index bits for current table */
+ unsigned drop; /* code bits to drop for sub-table */
+ int left; /* number of prefix codes available */
+ unsigned used; /* code entries in table used */
+ unsigned huff; /* Huffman code */
+ unsigned incr; /* for incrementing code, index */
+ unsigned fill; /* index for replicating entries */
+ unsigned low; /* low bits for current root entry */
+ unsigned mask; /* mask for low root bits */
+ code this; /* table entry for duplication */
+ code *next; /* next available space in table */
+ const unsigned short *base; /* base value table to use */
+ const unsigned short *extra; /* extra bits table to use */
+ int end; /* use base and extra for symbol > end */
+ unsigned short count[MAXBITS+1]; /* number of codes of each length */
+ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
+ static const unsigned short lbase[31] = { /* Length codes 257..285 base */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ static const unsigned short lext[31] = { /* Length codes 257..285 extra */
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
+ 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196};
+ static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577, 0, 0};
+ static const unsigned short dext[32] = { /* Distance codes 0..29 extra */
+ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
+ 28, 28, 29, 29, 64, 64};
+
+ /*
+ Process a set of code lengths to create a canonical Huffman code. The
+ code lengths are lens[0..codes-1]. Each length corresponds to the
+ symbols 0..codes-1. The Huffman code is generated by first sorting the
+ symbols by length from short to long, and retaining the symbol order
+ for codes with equal lengths. Then the code starts with all zero bits
+ for the first code of the shortest length, and the codes are integer
+ increments for the same length, and zeros are appended as the length
+ increases. For the deflate format, these bits are stored backwards
+ from their more natural integer increment ordering, and so when the
+ decoding tables are built in the large loop below, the integer codes
+ are incremented backwards.
+
+ This routine assumes, but does not check, that all of the entries in
+ lens[] are in the range 0..MAXBITS. The caller must assure this.
+ 1..MAXBITS is interpreted as that code length. zero means that that
+ symbol does not occur in this code.
+
+ The codes are sorted by computing a count of codes for each length,
+ creating from that a table of starting indices for each length in the
+ sorted table, and then entering the symbols in order in the sorted
+ table. The sorted table is work[], with that space being provided by
+ the caller.
+
+ The length counts are used for other purposes as well, i.e. finding
+ the minimum and maximum length codes, determining if there are any
+ codes at all, checking for a valid set of lengths, and looking ahead
+ at length counts to determine sub-table sizes when building the
+ decoding tables.
+ */
+
+ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
+ for (len = 0; len <= MAXBITS; len++)
+ count[len] = 0;
+ for (sym = 0; sym < codes; sym++)
+ count[lens[sym]]++;
+
+ /* bound code lengths, force root to be within code lengths */
+ root = *bits;
+ for (max = MAXBITS; max >= 1; max--)
+ if (count[max] != 0) break;
+ if (root > max) root = max;
+ if (max == 0) { /* no symbols to code at all */
+ this.op = (unsigned char)64; /* invalid code marker */
+ this.bits = (unsigned char)1;
+ this.val = (unsigned short)0;
+ *(*table)++ = this; /* make a table to force an error */
+ *(*table)++ = this;
+ *bits = 1;
+ return 0; /* no symbols, but wait for decoding to report error */
+ }
+ for (min = 1; min < MAXBITS; min++)
+ if (count[min] != 0) break;
+ if (root < min) root = min;
+
+ /* check for an over-subscribed or incomplete set of lengths */
+ left = 1;
+ for (len = 1; len <= MAXBITS; len++) {
+ left <<= 1;
+ left -= count[len];
+ if (left < 0) return -1; /* over-subscribed */
+ }
+ if (left > 0 && (type == CODES || max != 1))
+ return -1; /* incomplete set */
+
+ /* generate offsets into symbol table for each length for sorting */
+ offs[1] = 0;
+ for (len = 1; len < MAXBITS; len++)
+ offs[len + 1] = offs[len] + count[len];
+
+ /* sort symbols by length, by symbol order within each length */
+ for (sym = 0; sym < codes; sym++)
+ if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym;
+
+ /*
+ Create and fill in decoding tables. In this loop, the table being
+ filled is at next and has curr index bits. The code being used is huff
+ with length len. That code is converted to an index by dropping drop
+ bits off of the bottom. For codes where len is less than drop + curr,
+ those top drop + curr - len bits are incremented through all values to
+ fill the table with replicated entries.
+
+ root is the number of index bits for the root table. When len exceeds
+ root, sub-tables are created pointed to by the root entry with an index
+ of the low root bits of huff. This is saved in low to check for when a
+ new sub-table should be started. drop is zero when the root table is
+ being filled, and drop is root when sub-tables are being filled.
+
+ When a new sub-table is needed, it is necessary to look ahead in the
+ code lengths to determine what size sub-table is needed. The length
+ counts are used for this, and so count[] is decremented as codes are
+ entered in the tables.
+
+ used keeps track of how many table entries have been allocated from the
+ provided *table space. It is checked when a LENS table is being made
+ against the space in *table, ENOUGH, minus the maximum space needed by
+ the worst case distance code, MAXD. This should never happen, but the
+ sufficiency of ENOUGH has not been proven exhaustively, hence the check.
+ This assumes that when type == LENS, bits == 9.
+
+ sym increments through all symbols, and the loop terminates when
+ all codes of length max, i.e. all codes, have been processed. This
+ routine permits incomplete codes, so another loop after this one fills
+ in the rest of the decoding tables with invalid code markers.
+ */
+
+ /* set up for code type */
+ switch (type) {
+ case CODES:
+ base = extra = work; /* dummy value--not used */
+ end = 19;
+ break;
+ case LENS:
+ base = lbase;
+ base -= 257;
+ extra = lext;
+ extra -= 257;
+ end = 256;
+ break;
+ default: /* DISTS */
+ base = dbase;
+ extra = dext;
+ end = -1;
+ }
+
+ /* initialize state for loop */
+ huff = 0; /* starting code */
+ sym = 0; /* starting code symbol */
+ len = min; /* starting code length */
+ next = *table; /* current table to fill in */
+ curr = root; /* current table index bits */
+ drop = 0; /* current bits to drop from code for index */
+ low = (unsigned)(-1); /* trigger new sub-table when len > root */
+ used = 1U << root; /* use root table entries */
+ mask = used - 1; /* mask for comparing low */
+
+ /* check available table space */
+ if (type == LENS && used >= ENOUGH - MAXD)
+ return 1;
+
+ /* process all codes and make table entries */
+ for (;;) {
+ /* create table entry */
+ this.bits = (unsigned char)(len - drop);
+ if ((int)(work[sym]) < end) {
+ this.op = (unsigned char)0;
+ this.val = work[sym];
+ }
+ else if ((int)(work[sym]) > end) {
+ this.op = (unsigned char)(extra[work[sym]]);
+ this.val = base[work[sym]];
+ }
+ else {
+ this.op = (unsigned char)(32 + 64); /* end of block */
+ this.val = 0;
+ }
+
+ /* replicate for those indices with low len bits equal to huff */
+ incr = 1U << (len - drop);
+ fill = 1U << curr;
+ min = fill; /* save offset to next table */
+ do {
+ fill -= incr;
+ next[(huff >> drop) + fill] = this;
+ } while (fill != 0);
+
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
+
+ /* go to next symbol, update count, len */
+ sym++;
+ if (--(count[len]) == 0) {
+ if (len == max) break;
+ len = lens[work[sym]];
+ }
+
+ /* create new sub-table if needed */
+ if (len > root && (huff & mask) != low) {
+ /* if first time, transition to sub-tables */
+ if (drop == 0)
+ drop = root;
+
+ /* increment past last table */
+ next += min; /* here min is 1 << curr */
+
+ /* determine length of next table */
+ curr = len - drop;
+ left = (int)(1 << curr);
+ while (curr + drop < max) {
+ left -= count[curr + drop];
+ if (left <= 0) break;
+ curr++;
+ left <<= 1;
+ }
+
+ /* check for enough space */
+ used += 1U << curr;
+ if (type == LENS && used >= ENOUGH - MAXD)
+ return 1;
+
+ /* point entry in root table to sub-table */
+ low = huff & mask;
+ (*table)[low].op = (unsigned char)curr;
+ (*table)[low].bits = (unsigned char)root;
+ (*table)[low].val = (unsigned short)(next - *table);
+ }
+ }
+
+ /*
+ Fill in rest of table for incomplete codes. This loop is similar to the
+ loop above in incrementing huff for table indices. It is assumed that
+ len is equal to curr + drop, so there is no loop needed to increment
+ through high index bits. When the current sub-table is filled, the loop
+ drops back to the root table to fill in any remaining entries there.
+ */
+ this.op = (unsigned char)64; /* invalid code marker */
+ this.bits = (unsigned char)(len - drop);
+ this.val = (unsigned short)0;
+ while (huff != 0) {
+ /* when done with sub-table, drop back to root table */
+ if (drop != 0 && (huff & mask) != low) {
+ drop = 0;
+ len = root;
+ next = *table;
+ this.bits = (unsigned char)len;
+ }
+
+ /* put invalid code marker in table */
+ next[huff >> drop] = this;
+
+ /* backwards increment the len-bit code huff */
+ incr = 1U << (len - 1);
+ while (huff & incr)
+ incr >>= 1;
+ if (incr != 0) {
+ huff &= incr - 1;
+ huff += incr;
+ }
+ else
+ huff = 0;
+ }
+
+ /* set return parameters */
+ *table += used;
+ *bits = root;
+ return 0;
+}
diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h
new file mode 100644
index 000000000..b70b4731a
--- /dev/null
+++ b/lib/zlib_inflate/inftrees.h
@@ -0,0 +1,59 @@
+#ifndef INFTREES_H
+#define INFTREES_H
+
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-2005 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Structure for decoding tables. Each entry provides either the
+ information needed to do the operation requested by the code that
+ indexed that table entry, or it provides a pointer to another
+ table that indexes more bits of the code. op indicates whether
+ the entry is a pointer to another table, a literal, a length or
+ distance, an end-of-block, or an invalid code. For a table
+ pointer, the low four bits of op is the number of index bits of
+ that table. For a length or distance, the low four bits of op
+ is the number of extra bits to get after the code. bits is
+ the number of bits in this code or part of the code to drop off
+ of the bit buffer. val is the actual byte to output in the case
+ of a literal, the base length or distance, or the offset from
+ the current table to the next table. Each entry is four bytes. */
+typedef struct {
+ unsigned char op; /* operation, extra bits, table bits */
+ unsigned char bits; /* bits in this part of the code */
+ unsigned short val; /* offset in table or code value */
+} code;
+
+/* op values as set by inflate_table():
+ 00000000 - literal
+ 0000tttt - table link, tttt != 0 is the number of table index bits
+ 0001eeee - length or distance, eeee is the number of extra bits
+ 01100000 - end of block
+ 01000000 - invalid code
+ */
+
+/* Maximum size of dynamic tree. The maximum found in a long but non-
+ exhaustive search was 1444 code structures (852 for length/literals
+ and 592 for distances, the latter actually the result of an
+ exhaustive search). The true maximum is not known, but the value
+ below is more than safe. */
+#define ENOUGH 2048
+#define MAXD 592
+
+/* Type of code to build for inftable() */
+typedef enum {
+ CODES,
+ LENS,
+ DISTS
+} codetype;
+
+extern int zlib_inflate_table (codetype type, unsigned short *lens,
+ unsigned codes, code **table,
+ unsigned *bits, unsigned short *work);
+#endif
diff --git a/lib/zlib_inflate/infutil.c b/lib/zlib_inflate/infutil.c
new file mode 100644
index 000000000..4824c2cc7
--- /dev/null
+++ b/lib/zlib_inflate/infutil.c
@@ -0,0 +1,49 @@
+#include <linux/zutil.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Utility function: initialize zlib, unpack binary blob, clean up zlib,
+ * return len or negative error code.
+ */
+int zlib_inflate_blob(void *gunzip_buf, unsigned int sz,
+ const void *buf, unsigned int len)
+{
+ const u8 *zbuf = buf;
+ struct z_stream_s *strm;
+ int rc;
+
+ rc = -ENOMEM;
+ strm = kmalloc(sizeof(*strm), GFP_KERNEL);
+ if (strm == NULL)
+ goto gunzip_nomem1;
+ strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (strm->workspace == NULL)
+ goto gunzip_nomem2;
+
+ /* gzip header (1f,8b,08... 10 bytes total + possible asciz filename)
+ * expected to be stripped from input
+ */
+ strm->next_in = zbuf;
+ strm->avail_in = len;
+ strm->next_out = gunzip_buf;
+ strm->avail_out = sz;
+
+ rc = zlib_inflateInit2(strm, -MAX_WBITS);
+ if (rc == Z_OK) {
+ rc = zlib_inflate(strm, Z_FINISH);
+ /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
+ if (rc == Z_STREAM_END)
+ rc = sz - strm->avail_out;
+ else
+ rc = -EINVAL;
+ zlib_inflateEnd(strm);
+ } else
+ rc = -EINVAL;
+
+ kfree(strm->workspace);
+gunzip_nomem2:
+ kfree(strm);
+gunzip_nomem1:
+ return rc; /* returns Z_OK (0) if successful */
+}
diff --git a/lib/zlib_inflate/infutil.h b/lib/zlib_inflate/infutil.h
new file mode 100644
index 000000000..784ab33b7
--- /dev/null
+++ b/lib/zlib_inflate/infutil.h
@@ -0,0 +1,39 @@
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFUTIL_H
+#define _INFUTIL_H
+
+#include <linux/zlib.h>
+#ifdef CONFIG_ZLIB_DFLTCC
+#include "../zlib_dfltcc/dfltcc.h"
+#include <asm/page.h>
+#endif
+
+/* memory allocation for inflation */
+
+struct inflate_workspace {
+ struct inflate_state inflate_state;
+#ifdef CONFIG_ZLIB_DFLTCC
+ struct dfltcc_state dfltcc_state;
+ unsigned char working_window[(1 << MAX_WBITS) + PAGE_SIZE];
+#else
+ unsigned char working_window[(1 << MAX_WBITS)];
+#endif
+};
+
+#ifdef CONFIG_ZLIB_DFLTCC
+/* dfltcc_state must be doubleword aligned for DFLTCC call */
+static_assert(offsetof(struct inflate_workspace, dfltcc_state) % 8 == 0);
+#endif
+
+#define WS(strm) ((struct inflate_workspace *)(strm->workspace))
+
+#endif
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
new file mode 100644
index 000000000..20f08c644
--- /dev/null
+++ b/lib/zstd/Makefile
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+# ################################################################
+# Copyright (c) Facebook, Inc.
+# All rights reserved.
+#
+# This source code is licensed under both the BSD-style license (found in the
+# LICENSE file in the root directory of this source tree) and the GPLv2 (found
+# in the COPYING file in the root directory of this source tree).
+# You may select, at your option, one of the above-listed licenses.
+# ################################################################
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
+obj-$(CONFIG_ZSTD_COMMON) += zstd_common.o
+
+zstd_compress-y := \
+ zstd_compress_module.o \
+ compress/fse_compress.o \
+ compress/hist.o \
+ compress/huf_compress.o \
+ compress/zstd_compress.o \
+ compress/zstd_compress_literals.o \
+ compress/zstd_compress_sequences.o \
+ compress/zstd_compress_superblock.o \
+ compress/zstd_double_fast.o \
+ compress/zstd_fast.o \
+ compress/zstd_lazy.o \
+ compress/zstd_ldm.o \
+ compress/zstd_opt.o \
+
+zstd_decompress-y := \
+ zstd_decompress_module.o \
+ decompress/huf_decompress.o \
+ decompress/zstd_ddict.o \
+ decompress/zstd_decompress.o \
+ decompress/zstd_decompress_block.o \
+
+zstd_common-y := \
+ zstd_common_module.o \
+ common/debug.o \
+ common/entropy_common.o \
+ common/error_private.o \
+ common/fse_decompress.o \
+ common/zstd_common.o \
diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
new file mode 100644
index 000000000..feef3a1b1
--- /dev/null
+++ b/lib/zstd/common/bitstream.h
@@ -0,0 +1,446 @@
+/* ******************************************************************
+ * bitstream
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+/*
+* This API consists of small unitary functions, which must be inlined for best performance.
+* Since link-time-optimization is not available for all compilers,
+* these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include "mem.h" /* unaligned access routines */
+#include "compiler.h" /* UNLIKELY() */
+#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */
+#include "error_private.h" /* error codes and messages */
+
+
+/*=========================================
+* Target specific
+=========================================*/
+
+#define STREAM_ACCUMULATOR_MIN_32 25
+#define STREAM_ACCUMULATOR_MIN_64 57
+#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+
+/*-******************************************
+* bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
+typedef struct {
+ size_t bitContainer;
+ unsigned bitPos;
+ char* startPtr;
+ char* ptr;
+ char* endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+* bitStream will never write outside of this buffer.
+* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+* bits are first added to a local register.
+* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+* Writing data into memory is an explicit operation, performed by the flushBits function.
+* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+* After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+* Last operation is to close the bitStream.
+* The function returns the final size of CStream in bytes.
+* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+* bitStream decoding API (read backward)
+**********************************************/
+typedef struct {
+ size_t bitContainer;
+ unsigned bitsConsumed;
+ const char* ptr;
+ const char* start;
+ const char* limitPtr;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+ BIT_DStream_endOfBuffer = 1,
+ BIT_DStream_completed = 2,
+ BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
+ /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+* A chunk of the bitStream is then stored into a local register.
+* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+* You can then retrieve bitFields stored into the local register, **in reverse order**.
+* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+* Otherwise, it can be less than that, so proceed accordingly.
+* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+* unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+* Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* Use GCC Intrinsic */
+ return __builtin_clz (val) ^ 31;
+# else /* Software version */
+ static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
+ 11, 14, 16, 18, 22, 25, 3, 30,
+ 8, 12, 20, 28, 15, 17, 24, 7,
+ 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*===== Local Constants =====*/
+static const unsigned BIT_mask[] = {
+ 0, 1, 3, 7, 0xF, 0x1F,
+ 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
+ 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF,
+ 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+ 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+ 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+
+/*-**************************************************************
+* bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ * `dstCapacity` must be > sizeof(size_t)
+ * @return : 0 if success,
+ * otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ bitC->bitContainer = 0;
+ bitC->bitPos = 0;
+ bitC->startPtr = (char*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+ if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! BIT_addBits() :
+ * can add up to 31 bits into `bitC`.
+ * Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+ assert(nbBits < BIT_MASK_SIZE);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ * works only if `value` is _clean_,
+ * meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+ size_t value, unsigned nbBits)
+{
+ assert((value>>nbBits) == 0);
+ assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ bitC->bitContainer |= value << bitC->bitPos;
+ bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ * assumption : bitContainer has not overflowed
+ * unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_flushBits() :
+ * assumption : bitContainer has not overflowed
+ * safe version; check for buffer overflow, and prevents it.
+ * note : does not signal buffer overflow.
+ * overflow will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+ size_t const nbBytes = bitC->bitPos >> 3;
+ assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+ bitC->ptr += nbBytes;
+ if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ bitC->bitPos &= 7;
+ bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_closeCStream() :
+ * @return : size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+ BIT_addBitsFast(bitC, 1, 1); /* endMark */
+ BIT_flushBits(bitC);
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+ * Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+ if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+ bitD->start = (const char*)srcBuffer;
+ bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
+ if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
+ bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
+ if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+ } else {
+ bitD->ptr = bitD->start;
+ bitD->bitContainer = *(const BYTE*)(bitD->start);
+ switch(srcSize)
+ {
+ case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+ ZSTD_FALLTHROUGH;
+
+ case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+ ZSTD_FALLTHROUGH;
+
+ case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+ ZSTD_FALLTHROUGH;
+
+ case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+ ZSTD_FALLTHROUGH;
+
+ case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+ ZSTD_FALLTHROUGH;
+
+ case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
+ ZSTD_FALLTHROUGH;
+
+ default: break;
+ }
+ { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+ bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */
+ }
+ bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+ }
+
+ return srcSize;
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+ return bitContainer >> start;
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+ U32 const regMask = sizeof(bitContainer)*8 - 1;
+ /* if start > regMask, bitstream is corrupted, and result is undefined */
+ assert(nbBits < BIT_MASK_SIZE);
+ /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
+ * than accessing memory. When bmi2 instruction is not present, we consider
+ * such cpus old (pre-Haswell, 2013) and their performance is not of that
+ * importance.
+ */
+#if defined(__x86_64__) || defined(_M_X86)
+ return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
+#else
+ return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
+#endif
+}
+
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+ assert(nbBits < BIT_MASK_SIZE);
+ return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ * Provides next n bits from local register.
+ * local register is not modified.
+ * On 32-bits, maxNbBits==24.
+ * On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ /* arbitrate between double-shift and shift+mask */
+#if 1
+ /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
+ * bitstream is likely corrupted, and result is undefined */
+ return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+ /* this code path is slower on my os-x laptop */
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+ * unsafe version; only works if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+ U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+ assert(nbBits >= 1);
+ return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+}
+
+MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+ bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ * Read (consume) next n bits from local register and update.
+ * Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBits(bitD, nbBits);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_readBitsFast() :
+ * unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
+{
+ size_t const value = BIT_lookBitsFast(bitD, nbBits);
+ assert(nbBits >= 1);
+ BIT_skipBits(bitD, nbBits);
+ return value;
+}
+
+/*! BIT_reloadDStreamFast() :
+ * Similar to BIT_reloadDStream(), but with two differences:
+ * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
+ * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
+ * point you must use BIT_reloadDStream() to reload.
+ */
+MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
+{
+ if (UNLIKELY(bitD->ptr < bitD->limitPtr))
+ return BIT_DStream_overflow;
+ assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
+ bitD->ptr -= bitD->bitsConsumed >> 3;
+ bitD->bitsConsumed &= 7;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr);
+ return BIT_DStream_unfinished;
+}
+
+/*! BIT_reloadDStream() :
+ * Refill `bitD` from buffer previously set in BIT_initDStream() .
+ * This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+ if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */
+ return BIT_DStream_overflow;
+
+ if (bitD->ptr >= bitD->limitPtr) {
+ return BIT_reloadDStreamFast(bitD);
+ }
+ if (bitD->ptr == bitD->start) {
+ if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+ return BIT_DStream_completed;
+ }
+ /* start < ptr < limitPtr */
+ { U32 nbBytes = bitD->bitsConsumed >> 3;
+ BIT_DStream_status result = BIT_DStream_unfinished;
+ if (bitD->ptr - nbBytes < bitD->start) {
+ nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
+ result = BIT_DStream_endOfBuffer;
+ }
+ bitD->ptr -= nbBytes;
+ bitD->bitsConsumed -= nbBytes*8;
+ bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+ return result;
+ }
+}
+
+/*! BIT_endOfDStream() :
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+ return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
new file mode 100644
index 000000000..c42d39faf
--- /dev/null
+++ b/lib/zstd/common/compiler.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
+#include "portability_macros.h"
+
+/*-*******************************************************
+* Compiler specifics
+*********************************************************/
+/* force inlining */
+
+#if !defined(ZSTD_NO_INLINE)
+#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# define INLINE_KEYWORD inline
+#else
+# define INLINE_KEYWORD
+#endif
+
+#define FORCE_INLINE_ATTR __attribute__((always_inline))
+
+#else
+
+#define INLINE_KEYWORD
+#define FORCE_INLINE_ATTR
+
+#endif
+
+/*
+ On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
+ This explicitly marks such functions as __cdecl so that the code will still compile
+ if a CC other than __cdecl has been made the default.
+*/
+#define WIN_CDECL
+
+/*
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to eliminate the constant
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/*
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+# define HINT_INLINE static INLINE_KEYWORD
+#else
+# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
+/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
+#define UNUSED_ATTR __attribute__((unused))
+
+/* force no inlining */
+#define FORCE_NOINLINE static __attribute__((__noinline__))
+
+
+/* target attribute */
+#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+
+/* Target attribute for BMI2 dynamic dispatch.
+ * Enable lzcnt, bmi, and bmi2.
+ * We test for bmi1 & bmi2. lzcnt is included in bmi1.
+ */
+#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
+
+/* prefetch
+ * can be disabled, by declaring NO_PREFETCH build macro */
+#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
+#elif defined(__aarch64__)
+# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
+# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
+#else
+# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
+# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
+#endif /* NO_PREFETCH */
+
+#define CACHELINE_SIZE 64
+
+#define PREFETCH_AREA(p, s) { \
+ const char* const _ptr = (const char*)(p); \
+ size_t const _size = (size_t)(s); \
+ size_t _pos; \
+ for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
+ PREFETCH_L2(_ptr + _pos); \
+ } \
+}
+
+/* vectorization
+ * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
+ * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
+#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
+# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
+# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
+# else
+# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
+# endif
+#else
+# define DONT_VECTORIZE
+#endif
+
+/* Tell the compiler that a branch is likely or unlikely.
+ * Only use these macros if it causes the compiler to generate better code.
+ * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
+ * and clang, please do.
+ */
+#define LIKELY(x) (__builtin_expect((x), 1))
+#define UNLIKELY(x) (__builtin_expect((x), 0))
+
+#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
+# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
+#else
+# define ZSTD_UNREACHABLE { assert(0); }
+#endif
+
+/* disable warnings */
+
+/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
+
+
+/* compile time determination of SIMD support */
+
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
+# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define ZSTD_HAS_C_ATTRIBUTE(x) 0
+#endif
+
+/* Only use C++ attributes in C++. Some compilers report support for C++
+ * attributes when compiling with C.
+ */
+#define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
+
+/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
+ * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
+ * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * - Else: __attribute__((__fallthrough__))
+ */
+#define ZSTD_FALLTHROUGH fallthrough
+
+/*-**************************************************************
+* Alignment check
+*****************************************************************/
+
+/* this test was initially positioned in mem.h,
+ * but this file is removed (or replaced) for linux kernel
+ * so it's now hosted in compiler.h,
+ * which remains valid for both user & kernel spaces.
+ */
+
+#ifndef ZSTD_ALIGNOF
+/* covers gcc, clang & MSVC */
+/* note : this section must come first, before C11,
+ * due to a limitation in the kernel source generator */
+# define ZSTD_ALIGNOF(T) __alignof(T)
+
+#endif /* ZSTD_ALIGNOF */
+
+/*-**************************************************************
+* Sanitizer
+*****************************************************************/
+
+
+
+#endif /* ZSTD_COMPILER_H */
diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
new file mode 100644
index 000000000..0db7b4240
--- /dev/null
+++ b/lib/zstd/common/cpu.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/*
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include "mem.h"
+
+
+typedef struct {
+ U32 f1c;
+ U32 f1d;
+ U32 f7b;
+ U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+ U32 f1c = 0;
+ U32 f1d = 0;
+ U32 f7b = 0;
+ U32 f7c = 0;
+#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+ /* The following block like the normal cpuid branch below, but gcc
+ * reserves ebx for use of its pic register so we must specially
+ * handle the save and restore to avoid clobbering the register
+ */
+ U32 n;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(n)
+ : "a"(0)
+ : "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "popl %%ebx\n\t"
+ : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+ : "a"(1));
+ }
+ if (n >= 7) {
+ __asm__(
+ "pushl %%ebx\n\t"
+ "cpuid\n\t"
+ "movl %%ebx, %%eax\n\t"
+ "popl %%ebx"
+ : "=a"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+ U32 n;
+ __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+ if (n >= 1) {
+ U32 f1a;
+ __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+ }
+ if (n >= 7) {
+ U32 f7a;
+ __asm__("cpuid"
+ : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+ : "a"(7), "c"(0)
+ : "edx");
+ }
+#endif
+ {
+ ZSTD_cpuid_t cpuid;
+ cpuid.f1c = f1c;
+ cpuid.f1d = f1d;
+ cpuid.f7b = f7b;
+ cpuid.f7c = f7c;
+ return cpuid;
+ }
+}
+
+#define X(name, r, bit) \
+ MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \
+ return ((cpuid.r) & (1U << bit)) != 0; \
+ }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+ C(sse3, 0)
+ C(pclmuldq, 1)
+ C(dtes64, 2)
+ C(monitor, 3)
+ C(dscpl, 4)
+ C(vmx, 5)
+ C(smx, 6)
+ C(eist, 7)
+ C(tm2, 8)
+ C(ssse3, 9)
+ C(cnxtid, 10)
+ C(fma, 12)
+ C(cx16, 13)
+ C(xtpr, 14)
+ C(pdcm, 15)
+ C(pcid, 17)
+ C(dca, 18)
+ C(sse41, 19)
+ C(sse42, 20)
+ C(x2apic, 21)
+ C(movbe, 22)
+ C(popcnt, 23)
+ C(tscdeadline, 24)
+ C(aes, 25)
+ C(xsave, 26)
+ C(osxsave, 27)
+ C(avx, 28)
+ C(f16c, 29)
+ C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+ D(fpu, 0)
+ D(vme, 1)
+ D(de, 2)
+ D(pse, 3)
+ D(tsc, 4)
+ D(msr, 5)
+ D(pae, 6)
+ D(mce, 7)
+ D(cx8, 8)
+ D(apic, 9)
+ D(sep, 11)
+ D(mtrr, 12)
+ D(pge, 13)
+ D(mca, 14)
+ D(cmov, 15)
+ D(pat, 16)
+ D(pse36, 17)
+ D(psn, 18)
+ D(clfsh, 19)
+ D(ds, 21)
+ D(acpi, 22)
+ D(mmx, 23)
+ D(fxsr, 24)
+ D(sse, 25)
+ D(sse2, 26)
+ D(ss, 27)
+ D(htt, 28)
+ D(tm, 29)
+ D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+ B(bmi1, 3)
+ B(hle, 4)
+ B(avx2, 5)
+ B(smep, 7)
+ B(bmi2, 8)
+ B(erms, 9)
+ B(invpcid, 10)
+ B(rtm, 11)
+ B(mpx, 14)
+ B(avx512f, 16)
+ B(avx512dq, 17)
+ B(rdseed, 18)
+ B(adx, 19)
+ B(smap, 20)
+ B(avx512ifma, 21)
+ B(pcommit, 22)
+ B(clflushopt, 23)
+ B(clwb, 24)
+ B(avx512pf, 26)
+ B(avx512er, 27)
+ B(avx512cd, 28)
+ B(sha, 29)
+ B(avx512bw, 30)
+ B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+ C(prefetchwt1, 0)
+ C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
new file mode 100644
index 000000000..bb863c9ea
--- /dev/null
+++ b/lib/zstd/common/debug.c
@@ -0,0 +1,24 @@
+/* ******************************************************************
+ * debug
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/*
+ * This module only hosts one global variable
+ * which can be used to dynamically influence the verbosity of traces,
+ * such as DEBUGLOG and RAWLOG
+ */
+
+#include "debug.h"
+
+int g_debuglevel = DEBUGLEVEL;
diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
new file mode 100644
index 000000000..6dd88d1fb
--- /dev/null
+++ b/lib/zstd/common/debug.h
@@ -0,0 +1,101 @@
+/* ******************************************************************
+ * debug
+ * Part of FSE library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/*
+ * The purpose of this header is to enable debug functions.
+ * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
+ * and DEBUG_STATIC_ASSERT() for compile-time.
+ *
+ * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
+ *
+ * Level 1 enables assert() only.
+ * Starting level 2, traces can be generated and pushed to stderr.
+ * The higher the level, the more verbose the traces.
+ *
+ * It's possible to dynamically adjust level using variable g_debug_level,
+ * which is only declared if DEBUGLEVEL>=2,
+ * and is a global variable, not multi-thread protected (use with care)
+ */
+
+#ifndef DEBUG_H_12987983217
+#define DEBUG_H_12987983217
+
+
+
+/* static assert is triggered at compile time, leaving no runtime artefact.
+ * static assert only works with compile-time constants.
+ * Also, this variant can only be used inside a function. */
+#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
+
+
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+
+/* recommended values for DEBUGLEVEL :
+ * 0 : release mode, no debug, all run-time checks disabled
+ * 1 : enables assert() only, no display
+ * 2 : reserved, for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (verbose)
+ * 7+: events at every position (*very* verbose)
+ *
+ * It's generally inconvenient to output traces > 5.
+ * In which case, it's possible to selectively trigger high verbosity levels
+ * by modifying g_debug_level.
+ */
+
+#if (DEBUGLEVEL>=1)
+# define ZSTD_DEPS_NEED_ASSERT
+# include "zstd_deps.h"
+#else
+# ifndef assert /* assert may be already defined, due to prior #include <assert.h> */
+# define assert(condition) ((void)0) /* disable assert (default) */
+# endif
+#endif
+
+#if (DEBUGLEVEL>=2)
+# define ZSTD_DEPS_NEED_IO
+# include "zstd_deps.h"
+extern int g_debuglevel; /* the variable is only declared,
+ it actually lives in debug.c,
+ and is shared by the whole process.
+ It's not thread-safe.
+ It's useful when enabling very verbose levels
+ on selective conditions (such as position in src) */
+
+# define RAWLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__VA_ARGS__); \
+ } }
+# define DEBUGLOG(l, ...) { \
+ if (l<=g_debuglevel) { \
+ ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
+ ZSTD_DEBUG_PRINT(" \n"); \
+ } }
+#else
+# define RAWLOG(l, ...) {} /* disabled */
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+
+#endif /* DEBUG_H_12987983217 */
diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
new file mode 100644
index 000000000..fef67056f
--- /dev/null
+++ b/lib/zstd/common/entropy_common.c
@@ -0,0 +1,357 @@
+/* ******************************************************************
+ * Common functions of New Generation Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* *************************************
+* Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h" /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*=== Version ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*=== Error Management ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+* FSE NCount encoding-decoding
+****************************************************************/
+static U32 FSE_ctz(U32 val)
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_ctz(val);
+# else /* Software version */
+ U32 count = 0;
+ while ((val & 1) == 0) {
+ val >>= 1;
+ ++count;
+ }
+ return count;
+# endif
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ const BYTE* const istart = (const BYTE*) headerBuffer;
+ const BYTE* const iend = istart + hbSize;
+ const BYTE* ip = istart;
+ int nbBits;
+ int remaining;
+ int threshold;
+ U32 bitStream;
+ int bitCount;
+ unsigned charnum = 0;
+ unsigned const maxSV1 = *maxSVPtr + 1;
+ int previous0 = 0;
+
+ if (hbSize < 8) {
+ /* This function only works when hbSize >= 8 */
+ char buffer[8] = {0};
+ ZSTD_memcpy(buffer, headerBuffer, hbSize);
+ { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
+ buffer, sizeof(buffer));
+ if (FSE_isError(countSize)) return countSize;
+ if (countSize > hbSize) return ERROR(corruption_detected);
+ return countSize;
+ } }
+ assert(hbSize >= 8);
+
+ /* init */
+ ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
+ bitStream = MEM_readLE32(ip);
+ nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
+ if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+ bitStream >>= 4;
+ bitCount = 4;
+ *tableLogPtr = nbBits;
+ remaining = (1<<nbBits)+1;
+ threshold = 1<<nbBits;
+ nbBits++;
+
+ for (;;) {
+ if (previous0) {
+ /* Count the number of repeats. Each time the
+ * 2-bit repeat code is 0b11 there is another
+ * repeat.
+ * Avoid UB by setting the high bit to 1.
+ */
+ int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ while (repeats >= 12) {
+ charnum += 3 * 12;
+ if (LIKELY(ip <= iend-7)) {
+ ip += 3;
+ } else {
+ bitCount -= (int)(8 * (iend - 7 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
+ }
+ charnum += 3 * repeats;
+ bitStream >>= 2 * repeats;
+ bitCount += 2 * repeats;
+
+ /* Add the final repeat which isn't 0b11. */
+ assert((bitStream & 3) < 3);
+ charnum += bitStream & 3;
+ bitCount += 2;
+
+ /* This is an error, but break and return an error
+ * at the end, because returning out of a loop makes
+ * it harder for the compiler to optimize.
+ */
+ if (charnum >= maxSV1) break;
+
+ /* We don't need to set the normalized count to 0
+ * because we already memset the whole buffer to 0.
+ */
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ assert((bitCount >> 3) <= 3); /* For first condition to work */
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ }
+ {
+ int const max = (2*threshold-1) - remaining;
+ int count;
+
+ if ((bitStream & (threshold-1)) < (U32)max) {
+ count = bitStream & (threshold-1);
+ bitCount += nbBits-1;
+ } else {
+ count = bitStream & (2*threshold-1);
+ if (count >= threshold) count -= max;
+ bitCount += nbBits;
+ }
+
+ count--; /* extra accuracy */
+ /* When it matters (small blocks), this is a
+ * predictable branch, because we don't use -1.
+ */
+ if (count >= 0) {
+ remaining -= count;
+ } else {
+ assert(count == -1);
+ remaining += count;
+ }
+ normalizedCounter[charnum++] = (short)count;
+ previous0 = !count;
+
+ assert(threshold > 1);
+ if (remaining < threshold) {
+ /* This branch can be folded into the
+ * threshold update condition because we
+ * know that threshold > 1.
+ */
+ if (remaining <= 1) break;
+ nbBits = BIT_highbit32(remaining) + 1;
+ threshold = 1 << (nbBits - 1);
+ }
+ if (charnum >= maxSV1) break;
+
+ if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+ ip += bitCount>>3;
+ bitCount &= 7;
+ } else {
+ bitCount -= (int)(8 * (iend - 4 - ip));
+ bitCount &= 31;
+ ip = iend - 4;
+ }
+ bitStream = MEM_readLE32(ip) >> bitCount;
+ } }
+ if (remaining != 1) return ERROR(corruption_detected);
+ /* Only possible when there are too many zeros. */
+ if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
+ if (bitCount > 32) return ERROR(corruption_detected);
+ *maxSVPtr = charnum-1;
+
+ ip += (bitCount+7)>>3;
+ return ip-istart;
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_readNCount_body_default(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+#if DYNAMIC_BMI2
+BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+#endif
+
+size_t FSE_readNCount_bmi2(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
+}
+
+size_t FSE_readNCount(
+ short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+ const void* headerBuffer, size_t hbSize)
+{
+ return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
+}
+
+
+/*! HUF_readStats() :
+ Read compact Huffman tree, saved by HUF_writeCTable().
+ `huffWeight` is destination buffer.
+ `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+ @return : size read from `src` , or an error Code .
+ Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize)
+{
+ U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+ U32 weightTotal;
+ const BYTE* ip = (const BYTE*) src;
+ size_t iSize;
+ size_t oSize;
+
+ if (!srcSize) return ERROR(srcSize_wrong);
+ iSize = ip[0];
+ /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
+
+ if (iSize >= 128) { /* special header */
+ oSize = iSize - 127;
+ iSize = ((oSize+1)/2);
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ if (oSize >= hwSize) return ERROR(corruption_detected);
+ ip += 1;
+ { U32 n;
+ for (n=0; n<oSize; n+=2) {
+ huffWeight[n] = ip[n/2] >> 4;
+ huffWeight[n+1] = ip[n/2] & 15;
+ } } }
+ else { /* header compressed with FSE (normal case) */
+ if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+ /* max (hwSize-1) values decoded, as last one is implied */
+ oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
+ if (FSE_isError(oSize)) return oSize;
+ }
+
+ /* collect weight stats */
+ ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+ weightTotal = 0;
+ { U32 n; for (n=0; n<oSize; n++) {
+ if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ rankStats[huffWeight[n]]++;
+ weightTotal += (1 << huffWeight[n]) >> 1;
+ } }
+ if (weightTotal == 0) return ERROR(corruption_detected);
+
+ /* get last non-null symbol weight (implied, total must be 2^n) */
+ { U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+ *tableLogPtr = tableLog;
+ /* determine last weight */
+ { U32 const total = 1 << tableLog;
+ U32 const rest = total - weightTotal;
+ U32 const verif = 1 << BIT_highbit32(rest);
+ U32 const lastWeight = BIT_highbit32(rest) + 1;
+ if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
+ huffWeight[oSize] = (BYTE)lastWeight;
+ rankStats[lastWeight]++;
+ } }
+
+ /* check tree construction validity */
+ if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
+
+ /* results */
+ *nbSymbolsPtr = (U32)(oSize+1);
+ return iSize+1;
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+ U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
+}
diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
new file mode 100644
index 000000000..6d1135f8c
--- /dev/null
+++ b/lib/zstd/common/error_private.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+#ifdef ZSTD_STRIP_ERROR_STRINGS
+ (void)code;
+ return "Error strings stripped";
+#else
+ static const char* const notErrorCode = "Unspecified error code";
+ switch( code )
+ {
+ case PREFIX(no_error): return "No error detected";
+ case PREFIX(GENERIC): return "Error (generic)";
+ case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+ case PREFIX(version_unsupported): return "Version not supported";
+ case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+ case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+ case PREFIX(corruption_detected): return "Corrupted block detected";
+ case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+ case PREFIX(parameter_unsupported): return "Unsupported parameter";
+ case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+ case PREFIX(init_missing): return "Context should be init first";
+ case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+ case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+ case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+ case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+ case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+ case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+ case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+ case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+ case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+ case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+ case PREFIX(srcSize_wrong): return "Src size is incorrect";
+ case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
+ /* following error codes are not stable and may be removed or changed in a future version */
+ case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+ case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+ case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
+ case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
+ case PREFIX(maxCode):
+ default: return notErrorCode;
+ }
+#endif
+}
diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
new file mode 100644
index 000000000..ca5101e54
--- /dev/null
+++ b/lib/zstd/common/error_private.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+
+
+/* ****************************************
+* Dependencies
+******************************************/
+#include <linux/zstd_errors.h> /* enum list */
+#include "compiler.h"
+#include "debug.h"
+#include "zstd_deps.h" /* size_t */
+
+
+/* ****************************************
+* Compiler-specific
+******************************************/
+#define ERR_STATIC static __attribute__((unused))
+
+
+/*-****************************************
+* Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+* Error codes handling
+******************************************/
+#undef ERROR /* already defined on Visual Studio */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+/* check and forward error code */
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
+
+
+/*-****************************************
+* Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code); /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+ return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * This is a helper function to help force C99-correctness during compilation.
+ * Under strict compilation modes, variadic macro arguments can't be empty.
+ * However, variadic function arguments can be. Using a function therefore lets
+ * us statically check that at least one (string) argument was passed,
+ * independent of the compilation flags.
+ */
+static INLINE_KEYWORD UNUSED_ATTR
+void _force_has_format_string(const char *format, ...) {
+ (void)format;
+}
+
+/*
+ * Ignore: this is an internal helper.
+ *
+ * We want to force this function invocation to be syntactically correct, but
+ * we don't want to force runtime evaluation of its arguments.
+ */
+#define _FORCE_HAS_FORMAT_STRING(...) \
+ if (0) { \
+ _force_has_format_string(__VA_ARGS__); \
+ }
+
+#define ERR_QUOTE(str) #str
+
+/*
+ * Return the specified error if the condition evaluates to true.
+ *
+ * In debug modes, prints additional information.
+ * In order to do that (particularly, printing the conditional that failed),
+ * this can't just wrap RETURN_ERROR().
+ */
+#define RETURN_ERROR_IF(cond, err, ...) \
+ if (cond) { \
+ RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ }
+
+/*
+ * Unconditionally return the specified error.
+ *
+ * In debug modes, prints additional information.
+ */
+#define RETURN_ERROR(err, ...) \
+ do { \
+ RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
+ __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return ERROR(err); \
+ } while(0);
+
+/*
+ * If the provided expression evaluates to an error code, returns that error code.
+ *
+ * In debug modes, prints additional information.
+ */
+#define FORWARD_IF_ERROR(err, ...) \
+ do { \
+ size_t const err_code = (err); \
+ if (ERR_isError(err_code)) { \
+ RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
+ __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
+ _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
+ RAWLOG(3, ": " __VA_ARGS__); \
+ RAWLOG(3, "\n"); \
+ return err_code; \
+ } \
+ } while(0);
+
+
+#endif /* ERROR_H_MODULE */
diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
new file mode 100644
index 000000000..4507043b2
--- /dev/null
+++ b/lib/zstd/common/fse.h
@@ -0,0 +1,711 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy codec
+ * Public Prototypes declaration
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+#ifndef FSE_H
+#define FSE_H
+
+
+/*-*****************************************
+* Dependencies
+******************************************/
+#include "zstd_deps.h" /* size_t, ptrdiff_t */
+
+
+/*-*****************************************
+* FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define FSE_PUBLIC_API
+#endif
+
+/*------ Version ------*/
+#define FSE_VERSION_MAJOR 0
+#define FSE_VERSION_MINOR 9
+#define FSE_VERSION_RELEASE 0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void); /*< library version number; to be used when checking dll version */
+
+
+/*-****************************************
+* FSE simple functions
+******************************************/
+/*! FSE_compress() :
+ Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+ 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+ @return : size of compressed data (<= dstCapacity).
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+ if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+ Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+ into already allocated destination buffer 'dst', of size 'dstCapacity'.
+ @return : size of regenerated data (<= maxDstSize),
+ or an error code, which can be tested using FSE_isError() .
+
+ ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+ Why ? : making this distinction requires a header.
+ Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+* Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+* FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+ Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+ Both parameters can be defined as '0' to mean : use default value
+ @return : size of compressed data
+ Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+ if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+ if FSE_isError(return), it's an error code.
+*/
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+* FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[] (see hist.h)
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_optimalTableLog():
+ dynamically downsize 'tableLog' when conditions are met.
+ It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+ @return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+ normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+ 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+ useLowProbCount is a boolean parameter which trades off compressed size for
+ faster header decoding. When it is set to 1, the compressed data will be slightly
+ smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
+ faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
+ is a good default, since header deserialization makes a big speed difference.
+ Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
+ @return : tableLog,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
+
+/*! FSE_NCountWriteBound():
+ Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+ Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+ Compactly save 'normalizedCounter' into 'buffer'.
+ @return : size of the compressed table,
+ or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/*! Constructor and Destructor of FSE_CTable.
+ Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+ Builds `ct`, which must be already allocated, using FSE_createCTable().
+ @return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+ Compress `src` using `ct` into `dst` which must be already allocated.
+ @return : size of compressed data (<= `dstCapacity`),
+ or 0 if compressed data could not fit into `dst`,
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+ Read compactly saved 'normalizedCounter' from 'rBuffer'.
+ @return : size read from 'rBuffer',
+ or an errorCode, which can be tested using FSE_isError().
+ maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize);
+
+/*! FSE_readNCount_bmi2():
+ * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
+ */
+FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
+ unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
+ const void* rBuffer, size_t rBuffSize, int bmi2);
+
+/*! Constructor and Destructor of FSE_DTable.
+ Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+ Builds 'dt', which must be already allocated, using FSE_createDTable().
+ return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+ Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+ into `dst` which must be already allocated.
+ @return : size of regenerated data (necessarily <= `dstCapacity`),
+ or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+#endif /* FSE_H */
+
+#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+* Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<(maxTableLog)))
+
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue) (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog) (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
+
+/* *****************************************
+ * FSE advanced API
+ ***************************************** */
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/*< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/*< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/*< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
+ * See FSE_buildCTable_wksp() for breakdown of workspace usage.
+ */
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
+#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
+FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+/*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/*< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/*< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
+#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
+/*< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
+/*< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
+
+typedef enum {
+ FSE_repeat_none, /*< Cannot use the previous table */
+ FSE_repeat_check, /*< Can use the previous table but it must be checked */
+ FSE_repeat_valid /*< Can use the previous table and it is assumed to be valid */
+ } FSE_repeat;
+
+/* *****************************************
+* FSE symbol compression API
+*******************************************/
+/*!
+ This API consists of small unitary functions, which highly benefit from being inlined.
+ Hence their body are included in next section.
+*/
+typedef struct {
+ ptrdiff_t value;
+ const void* stateTable;
+ const void* symbolTT;
+ unsigned stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/*<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable ct; // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream; // bitStream tracking structure
+FSE_CState_t state; // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+ size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+ FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+ FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+ BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+ BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+ FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+ size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+* FSE symbol decompression API
+*******************************************/
+typedef struct {
+ size_t state;
+ const void* table; /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/*<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream; // Stream context
+FSE_DState_t DState; // State context. Multiple ones are possible
+FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+ errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+ errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+ unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+ size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+ endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+ BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+ BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+ FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+* FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+* Implementation of inlined functions
+*******************************************/
+typedef struct {
+ int deltaFindState;
+ U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+ const void* ptr = ct;
+ const U16* u16ptr = (const U16*) ptr;
+ const U32 tableLog = MEM_read16(ptr);
+ statePtr->value = (ptrdiff_t)1<<tableLog;
+ statePtr->stateTable = u16ptr+2;
+ statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
+ statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+* uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+ FSE_initCState(statePtr, ct);
+ { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* stateTable = (const U16*)(statePtr->stateTable);
+ U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+ statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+ statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+ }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
+{
+ FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+ const U16* const stateTable = (const U16*)(statePtr->stateTable);
+ U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+ BIT_addBits(bitC, statePtr->value, nbBitsOut);
+ statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+ BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+ BIT_flushBits(bitC);
+}
+
+
+/* FSE_getMaxNbBits() :
+ * Approximate maximum cost of a symbol, in bits.
+ * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
+}
+
+/* FSE_bitCost() :
+ * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
+ * note 1 : assume symbolValue is valid (<= maxSymbolValue)
+ * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
+MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
+{
+ const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
+ U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
+ U32 const threshold = (minNbBits+1) << 16;
+ assert(tableLog < 16);
+ assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */
+ { U32 const tableSize = 1 << tableLog;
+ U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
+ U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */
+ U32 const bitMultiplier = 1 << accuracyLog;
+ assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
+ assert(normalizedDeltaFromThreshold <= bitMultiplier);
+ return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
+ }
+}
+
+
+/* ====== Decompression ====== */
+
+typedef struct {
+ U16 tableLog;
+ U16 fastMode;
+} FSE_DTableHeader; /* sizeof U32 */
+
+typedef struct
+{
+ unsigned short newState;
+ unsigned char symbol;
+ unsigned char nbBits;
+} FSE_decode_t; /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+ unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+ FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+ U32 const nbBits = DInfo.nbBits;
+ BYTE const symbol = DInfo.symbol;
+ size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+ DStatePtr->state = DInfo.newState + lowBits;
+ return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+ return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+* Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+* Increasing memory usage improves compression ratio
+* Reduced memory usage can improve speed, due to cache effect
+* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+# define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+# define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
+# error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+* Maximum symbol value authorized.
+* Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+# define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+* template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+* Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
new file mode 100644
index 000000000..8dcb8ca39
--- /dev/null
+++ b/lib/zstd/common/fse_decompress.c
@@ -0,0 +1,390 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy decoder
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "debug.h" /* assert */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+ ZSTD_free(dt);
+}
+
+static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+ U16* symbolNext = (U16*)workSpace;
+ BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
+
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+ U32 highThreshold = tableSize-1;
+
+ /* Sanity Checks */
+ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+ /* Init, lay down lowprob symbols */
+ { FSE_DTableHeader DTableH;
+ DTableH.tableLog = (U16)tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ symbolNext[s] = normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].symbol = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ for (i=0; i<normalizedCounter[s]; i++) {
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ { U32 u;
+ for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ } }
+
+ return 0;
+}
+
+size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+* Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->newState = 0;
+ cell->symbol = symbolValue;
+ cell->nbBits = 0;
+
+ return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+ void* ptr = dt;
+ FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+ void* dPtr = dt + 1;
+ FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSV1 = tableMask+1;
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* Build Decoding Table */
+ DTableH->tableLog = (U16)nbBits;
+ DTableH->fastMode = 1;
+ for (s=0; s<maxSV1; s++) {
+ dinfo[s].newState = 0;
+ dinfo[s].symbol = (BYTE)s;
+ dinfo[s].nbBits = (BYTE)nbBits;
+ }
+
+ return 0;
+}
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+ void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt, const unsigned fast)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const omax = op + maxDstSize;
+ BYTE* const olimit = omax-3;
+
+ BIT_DStream_t bitD;
+ FSE_DState_t state1;
+ FSE_DState_t state2;
+
+ /* Init */
+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+ FSE_initDState(&state1, &bitD, dt);
+ FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+ /* 4 symbols per loop */
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+ op[0] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[1] = FSE_GETSYMBOL(&state2);
+
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+ op[2] = FSE_GETSYMBOL(&state1);
+
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
+ BIT_reloadDStream(&bitD);
+
+ op[3] = FSE_GETSYMBOL(&state2);
+ }
+
+ /* tail */
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+ while (1) {
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state1);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state2);
+ break;
+ }
+
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+ *op++ = FSE_GETSYMBOL(&state2);
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+ *op++ = FSE_GETSYMBOL(&state1);
+ break;
+ } }
+
+ return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize,
+ const FSE_DTable* dt)
+{
+ const void* ptr = dt;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+ return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+typedef struct {
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
+ FSE_DTable dtable[]; /* Dynamically sized */
+} FSE_DecompressWksp;
+
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
+ void* dst, size_t dstCapacity,
+ const void* cSrc, size_t cSrcSize,
+ unsigned maxLog, void* workSpace, size_t wkspSize,
+ int bmi2)
+{
+ const BYTE* const istart = (const BYTE*)cSrc;
+ const BYTE* ip = istart;
+ unsigned tableLog;
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
+
+ DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
+
+ /* normal FSE decoding mode */
+ {
+ size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
+ if (FSE_isError(NCountLength)) return NCountLength;
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+ assert(NCountLength <= cSrcSize);
+ ip += NCountLength;
+ cSrcSize -= NCountLength;
+ }
+
+ if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
+ workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
+ wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
+
+ CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
+
+ {
+ const void* ptr = wksp->dtable;
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+ const U32 fastMode = DTableH->fastMode;
+
+ /* select fast mode (static) */
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
+}
+
+#if DYNAMIC_BMI2
+BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
+{
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
+}
+#endif
+
+size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+ }
+#endif
+ (void)bmi2;
+ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
new file mode 100644
index 000000000..5042ff870
--- /dev/null
+++ b/lib/zstd/common/huf.h
@@ -0,0 +1,358 @@
+/* ******************************************************************
+ * huff0 huffman codec,
+ * part of Finite State Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include "zstd_deps.h" /* size_t */
+
+
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ * HUF symbols remain "private" (internal symbols for library only).
+ * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
+# define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+# define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* *** simple functions *** */
+/* ========================== */
+
+/* HUF_compress() :
+ * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ * if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize);
+
+/* HUF_decompress() :
+ * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ * into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ * Note : in contrast with FSE, HUF_decompress can regenerate
+ * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ * because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ * or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
+ const void* cSrc, size_t cSrcSize);
+
+
+/* *** Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */
+
+/* Error Management */
+HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+/* HUF_compress2() :
+ * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog);
+
+/* HUF_compress4X_wksp() :
+ * Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
+#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
+#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize);
+
+#endif /* HUF_H_298734234 */
+
+/* ******************************************************************
+ * WARNING !!
+ * The following section contains advanced and experimental definitions
+ * which shall never be used in the context of a dynamic library,
+ * because they are not guaranteed to remain stable in the future.
+ * Only consider them in association with static linking.
+ * *****************************************************************/
+#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h" /* U32 */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
+#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX 255
+
+#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+# error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+* Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
+typedef size_t HUF_CElt; /* consider it an incomplete type */
+#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+ HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+* Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+#endif
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
+#endif
+
+
+/* ****************************************
+ * HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
+ * 3. build Huffman table from count using HUF_buildCTable()
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ * The following API allows targeting specific sub-functions for advanced tasks.
+ * For example, it's possible to compress several blocks using the same 'CTable',
+ * or to save and regenerate 'CTable' using external methods.
+ */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
+size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
+int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
+
+typedef enum {
+ HUF_repeat_none, /*< Cannot use the previous table */
+ HUF_repeat_check, /*< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+ HUF_repeat_valid /*< Can use the previous table and it is assumed to be valid */
+ } HUF_repeat;
+/* HUF_compress4X_repeat() :
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid.
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
+
+/* HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ */
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_buildCTable_wksp (HUF_CElt* tree,
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
+ void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+ * Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize);
+
+/*! HUF_readStats_wksp() :
+ * Same as HUF_readStats() but takes an external workspace which must be
+ * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
+#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+ const void* src, size_t srcSize,
+ void* workspace, size_t wkspSize,
+ int bmi2);
+
+/* HUF_readCTable() :
+ * Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
+
+/* HUF_getNbBitsFromCTable() :
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
+ * Note 1 : is not inlined, as HUF_CElt definition is private */
+U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
+
+/*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
+
+/* HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+/*
+ * The minimum workspace size for the `workSpace` used in
+ * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
+ *
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ * Buffer overflow errors may potentially occur if code modifications result in
+ * a required workspace size greater than that specified in the following
+ * macro.
+ */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+#endif
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+
+/* ====================== */
+/* single stream variants */
+/* ====================== */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /*< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
+/* HUF_compress1X_repeat() :
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ * If it uses hufTable it does not modify hufTable or repeat.
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ * If preferRepeat then the old table will always be used if valid.
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
+
+size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
+#endif
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */
+#endif
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+#endif
+
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+#ifndef HUF_FORCE_DECOMPRESS_X1
+size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
+#endif
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
new file mode 100644
index 000000000..1d9cc0392
--- /dev/null
+++ b/lib/zstd/common/mem.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+/*-****************************************
+* Dependencies
+******************************************/
+#include <asm/unaligned.h> /* get_unaligned, put_unaligned* */
+#include <linux/compiler.h> /* inline */
+#include <linux/swab.h> /* swab32, swab64 */
+#include <linux/types.h> /* size_t, ptrdiff_t */
+#include "debug.h" /* DEBUG_STATIC_ASSERT */
+
+/*-****************************************
+* Compiler specifics
+******************************************/
+#define MEM_STATIC static inline
+
+/*-**************************************************************
+* Basic Types
+*****************************************************************/
+typedef uint8_t BYTE;
+typedef uint8_t U8;
+typedef int8_t S8;
+typedef uint16_t U16;
+typedef int16_t S16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef int64_t S64;
+
+/*-**************************************************************
+* Memory I/O API
+*****************************************************************/
+/*=== Static platform detection ===*/
+MEM_STATIC unsigned MEM_32bits(void);
+MEM_STATIC unsigned MEM_64bits(void);
+MEM_STATIC unsigned MEM_isLittleEndian(void);
+
+/*=== Native unaligned read/write ===*/
+MEM_STATIC U16 MEM_read16(const void* memPtr);
+MEM_STATIC U32 MEM_read32(const void* memPtr);
+MEM_STATIC U64 MEM_read64(const void* memPtr);
+MEM_STATIC size_t MEM_readST(const void* memPtr);
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value);
+MEM_STATIC void MEM_write32(void* memPtr, U32 value);
+MEM_STATIC void MEM_write64(void* memPtr, U64 value);
+
+/*=== Little endian unaligned read/write ===*/
+MEM_STATIC U16 MEM_readLE16(const void* memPtr);
+MEM_STATIC U32 MEM_readLE24(const void* memPtr);
+MEM_STATIC U32 MEM_readLE32(const void* memPtr);
+MEM_STATIC U64 MEM_readLE64(const void* memPtr);
+MEM_STATIC size_t MEM_readLEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
+
+/*=== Big endian unaligned read/write ===*/
+MEM_STATIC U32 MEM_readBE32(const void* memPtr);
+MEM_STATIC U64 MEM_readBE64(const void* memPtr);
+MEM_STATIC size_t MEM_readBEST(const void* memPtr);
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
+
+/*=== Byteswap ===*/
+MEM_STATIC U32 MEM_swap32(U32 in);
+MEM_STATIC U64 MEM_swap64(U64 in);
+MEM_STATIC size_t MEM_swapST(size_t in);
+
+/*-**************************************************************
+* Memory I/O Implementation
+*****************************************************************/
+MEM_STATIC unsigned MEM_32bits(void)
+{
+ return sizeof(size_t) == 4;
+}
+
+MEM_STATIC unsigned MEM_64bits(void)
+{
+ return sizeof(size_t) == 8;
+}
+
+#if defined(__LITTLE_ENDIAN)
+#define MEM_LITTLE_ENDIAN 1
+#else
+#define MEM_LITTLE_ENDIAN 0
+#endif
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+ return MEM_LITTLE_ENDIAN;
+}
+
+MEM_STATIC U16 MEM_read16(const void *memPtr)
+{
+ return get_unaligned((const U16 *)memPtr);
+}
+
+MEM_STATIC U32 MEM_read32(const void *memPtr)
+{
+ return get_unaligned((const U32 *)memPtr);
+}
+
+MEM_STATIC U64 MEM_read64(const void *memPtr)
+{
+ return get_unaligned((const U64 *)memPtr);
+}
+
+MEM_STATIC size_t MEM_readST(const void *memPtr)
+{
+ return get_unaligned((const size_t *)memPtr);
+}
+
+MEM_STATIC void MEM_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
+
+MEM_STATIC void MEM_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+MEM_STATIC void MEM_write64(void *memPtr, U64 value)
+{
+ put_unaligned(value, (U64 *)memPtr);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
+{
+ put_unaligned_le16(val, memPtr);
+}
+
+MEM_STATIC U32 MEM_readLE24(const void *memPtr)
+{
+ return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
+{
+ MEM_writeLE16(memPtr, (U16)val);
+ ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void *memPtr)
+{
+ return get_unaligned_le32(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
+{
+ put_unaligned_le32(val32, memPtr);
+}
+
+MEM_STATIC U64 MEM_readLE64(const void *memPtr)
+{
+ return get_unaligned_le64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
+{
+ put_unaligned_le64(val64, memPtr);
+}
+
+MEM_STATIC size_t MEM_readLEST(const void *memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readLE32(memPtr);
+ else
+ return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeLE32(memPtr, (U32)val);
+ else
+ MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void *memPtr)
+{
+ return get_unaligned_be32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
+{
+ put_unaligned_be32(val32, memPtr);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void *memPtr)
+{
+ return get_unaligned_be64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
+{
+ put_unaligned_be64(val64, memPtr);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void *memPtr)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_readBE32(memPtr);
+ else
+ return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
+{
+ if (MEM_32bits())
+ MEM_writeBE32(memPtr, (U32)val);
+ else
+ MEM_writeBE64(memPtr, (U64)val);
+}
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+ return swab32(in);
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+ return swab64(in);
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+ if (MEM_32bits())
+ return (size_t)MEM_swap32((U32)in);
+ else
+ return (size_t)MEM_swap64((U64)in);
+}
+
+#endif /* MEM_H_MODULE */
diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h
new file mode 100644
index 000000000..0e3b2c0a5
--- /dev/null
+++ b/lib/zstd/common/portability_macros.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_PORTABILITY_MACROS_H
+#define ZSTD_PORTABILITY_MACROS_H
+
+/*
+ * This header file contains macro defintions to support portability.
+ * This header is shared between C and ASM code, so it MUST only
+ * contain macro definitions. It MUST not contain any C code.
+ *
+ * This header ONLY defines macros to detect platforms/feature support.
+ *
+ */
+
+
+/* compat. with non-clang compilers */
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/* compat. with non-clang compilers */
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+/* detects whether we are being compiled under msan */
+
+/* detects whether we are being compiled under asan */
+
+/* detects whether we are being compiled under dfsan */
+
+/* Mark the internal assembly functions as hidden */
+#ifdef __ELF__
+# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
+#else
+# define ZSTD_HIDE_ASM_FUNCTION(func)
+#endif
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+ #if ((defined(__clang__) && __has_attribute(__target__)) \
+ || (defined(__GNUC__) \
+ && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
+ && (defined(__x86_64__) || defined(_M_X64)) \
+ && !defined(__BMI2__)
+ # define DYNAMIC_BMI2 1
+ #else
+ # define DYNAMIC_BMI2 0
+ #endif
+#endif
+
+/*
+ * Only enable assembly for GNUC comptabile compilers,
+ * because other platforms may not support GAS assembly syntax.
+ *
+ * Only enable assembly for Linux / MacOS, other platforms may
+ * work, but they haven't been tested. This could likely be
+ * extended to BSD systems.
+ *
+ * Disable assembly when MSAN is enabled, because MSAN requires
+ * 100% of code to be instrumented to work.
+ */
+#define ZSTD_ASM_SUPPORTED 1
+
+/*
+ * Determines whether we should enable assembly for x86-64
+ * with BMI2.
+ *
+ * Enable if all of the following conditions hold:
+ * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
+ * - Assembly is supported
+ * - We are compiling for x86-64 and either:
+ * - DYNAMIC_BMI2 is enabled
+ * - BMI2 is supported at compile time
+ */
+#define ZSTD_ENABLE_ASM_X86_64_BMI2 0
+
+#endif /* ZSTD_PORTABILITY_MACROS_H */
diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
new file mode 100644
index 000000000..3d7e35b30
--- /dev/null
+++ b/lib/zstd/common/zstd_common.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/*-*************************************
+* Dependencies
+***************************************/
+#define ZSTD_DEPS_NEED_MALLOC
+#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
+#include "error_private.h"
+#include "zstd_internal.h"
+
+
+/*-****************************************
+* Version
+******************************************/
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
+
+/*-****************************************
+* ZSTD Error Management
+******************************************/
+#undef ZSTD_isError /* defined within zstd_internal.h */
+/*! ZSTD_isError() :
+ * tells if a return value is an error code
+ * symbol is required for external callers */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+ * provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+ * convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+ * provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+
+
+
+/*=**************************************************************
+* Custom allocator
+****************************************************************/
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc)
+ return customMem.customAlloc(customMem.opaque, size);
+ return ZSTD_malloc(size);
+}
+
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
+{
+ if (customMem.customAlloc) {
+ /* calloc implemented as malloc+memset;
+ * not as efficient as calloc, but next best guess for custom malloc */
+ void* const ptr = customMem.customAlloc(customMem.opaque, size);
+ ZSTD_memset(ptr, 0, size);
+ return ptr;
+ }
+ return ZSTD_calloc(1, size);
+}
+
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
+{
+ if (ptr!=NULL) {
+ if (customMem.customFree)
+ customMem.customFree(customMem.opaque, ptr);
+ else
+ ZSTD_free(ptr);
+ }
+}
diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
new file mode 100644
index 000000000..2c34e8a33
--- /dev/null
+++ b/lib/zstd/common/zstd_deps.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*
+ * This file provides common libc dependencies that zstd requires.
+ * The purpose is to allow replacing this file with a custom implementation
+ * to compile zstd without libc support.
+ */
+
+/* Need:
+ * NULL
+ * INT_MAX
+ * UINT_MAX
+ * ZSTD_memcpy()
+ * ZSTD_memset()
+ * ZSTD_memmove()
+ */
+#ifndef ZSTD_DEPS_COMMON
+#define ZSTD_DEPS_COMMON
+
+#include <linux/limits.h>
+#include <linux/stddef.h>
+
+#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
+#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
+#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
+
+#endif /* ZSTD_DEPS_COMMON */
+
+/*
+ * Define malloc as always failing. That means the user must
+ * either use ZSTD_customMem or statically allocate memory.
+ * Need:
+ * ZSTD_malloc()
+ * ZSTD_free()
+ * ZSTD_calloc()
+ */
+#ifdef ZSTD_DEPS_NEED_MALLOC
+#ifndef ZSTD_DEPS_MALLOC
+#define ZSTD_DEPS_MALLOC
+
+#define ZSTD_malloc(s) ({ (void)(s); NULL; })
+#define ZSTD_free(p) ((void)(p))
+#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
+
+#endif /* ZSTD_DEPS_MALLOC */
+#endif /* ZSTD_DEPS_NEED_MALLOC */
+
+/*
+ * Provides 64-bit math support.
+ * Need:
+ * U64 ZSTD_div64(U64 dividend, U32 divisor)
+ */
+#ifdef ZSTD_DEPS_NEED_MATH64
+#ifndef ZSTD_DEPS_MATH64
+#define ZSTD_DEPS_MATH64
+
+#include <linux/math64.h>
+
+static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
+ return div_u64(dividend, divisor);
+}
+
+#endif /* ZSTD_DEPS_MATH64 */
+#endif /* ZSTD_DEPS_NEED_MATH64 */
+
+/*
+ * This is only requested when DEBUGLEVEL >= 1, meaning
+ * it is disabled in production.
+ * Need:
+ * assert()
+ */
+#ifdef ZSTD_DEPS_NEED_ASSERT
+#ifndef ZSTD_DEPS_ASSERT
+#define ZSTD_DEPS_ASSERT
+
+#include <linux/kernel.h>
+
+#define assert(x) WARN_ON(!(x))
+
+#endif /* ZSTD_DEPS_ASSERT */
+#endif /* ZSTD_DEPS_NEED_ASSERT */
+
+/*
+ * This is only requested when DEBUGLEVEL >= 2, meaning
+ * it is disabled in production.
+ * Need:
+ * ZSTD_DEBUG_PRINT()
+ */
+#ifdef ZSTD_DEPS_NEED_IO
+#ifndef ZSTD_DEPS_IO
+#define ZSTD_DEPS_IO
+
+#include <linux/printk.h>
+
+#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
+
+#endif /* ZSTD_DEPS_IO */
+#endif /* ZSTD_DEPS_NEED_IO */
diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
new file mode 100644
index 000000000..93305d9b4
--- /dev/null
+++ b/lib/zstd/common/zstd_internal.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "compiler.h"
+#include "cpu.h"
+#include "mem.h"
+#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include <linux/zstd.h>
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include <linux/xxhash.h> /* XXH_reset, update, digest */
+#define ZSTD_TRACE 0
+
+
+/* ---- static assert (debug) --- */
+#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
+#define ZSTD_isError ERR_isError /* for inlining */
+#define FSE_isError ERR_isError
+#define HUF_isError ERR_isError
+
+
+/*-*************************************
+* shared macros
+***************************************/
+#undef MIN
+#undef MAX
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))
+
+
+/*-*************************************
+* Common constants
+***************************************/
+#define ZSTD_OPT_NUM (1<<12)
+
+#define ZSTD_REP_NUM 3 /* number of repcodes */
+static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6 64
+#define BIT5 32
+#define BIT4 16
+#define BIT1 2
+#define BIT0 1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_FRAMEIDSIZE 4 /* magic number size */
+
+#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define ZSTD_FRAMECHECKSUMSIZE 4
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+
+#define Litbits 8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML 52
+#define MaxLL 35
+#define DefaultMaxOff 28
+#define MaxOff 31
+#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog 9
+#define LLFSELog 9
+#define OffFSELog 8
+#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+
+#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
+/* Each table cannot take more than #symbols * FSELog bits */
+#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
+
+static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 6, 7, 8, 9,10,11,12,
+ 13,14,15,16
+};
+static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
+ 4, 3, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 3, 2, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1
+};
+#define LL_DEFAULTNORMLOG 6 /* for static allocation */
+static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 7, 8, 9,10,11,
+ 12,13,14,15,16
+};
+static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
+ 1, 4, 3, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,-1,-1,
+ -1,-1,-1,-1,-1
+};
+#define ML_DEFAULTNORMLOG 6 /* for static allocation */
+static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ -1,-1,-1,-1,-1
+};
+#define OF_DEFAULTNORMLOG 5 /* for static allocation */
+static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+* Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) {
+#if defined(ZSTD_ARCH_ARM_NEON)
+ vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));
+#else
+ ZSTD_memcpy(dst, src, 8);
+#endif
+}
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/* Need to use memmove here since the literal buffer can now be located within
+ the dst buffer. In circumstances where the op "catches up" to where the
+ literal buffer is, there can be partial overlaps in this call on the final
+ copy if the literal is being shifted by less than 16 bytes. */
+static void ZSTD_copy16(void* dst, const void* src) {
+#if defined(ZSTD_ARCH_ARM_NEON)
+ vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));
+#elif defined(ZSTD_ARCH_X86_SSE2)
+ _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));
+#elif defined(__clang__)
+ ZSTD_memmove(dst, src, 16);
+#else
+ /* ZSTD_memmove is not inlined properly by gcc */
+ BYTE copy16_buf[16];
+ ZSTD_memcpy(copy16_buf, src, 16);
+ ZSTD_memcpy(dst, copy16_buf, 16);
+#endif
+}
+#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
+
+#define WILDCOPY_OVERLENGTH 32
+#define WILDCOPY_VECLEN 16
+
+typedef enum {
+ ZSTD_no_overlap,
+ ZSTD_overlap_src_before_dst
+ /* ZSTD_overlap_dst_before_src, */
+} ZSTD_overlap_e;
+
+/*! ZSTD_wildcopy() :
+ * Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
+ * The src buffer must be before the dst buffer.
+ */
+MEM_STATIC FORCE_INLINE_ATTR
+void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
+{
+ ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + length;
+
+ if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
+ /* Handle short offset copies. */
+ do {
+ COPY8(op, ip)
+ } while (op < oend);
+ } else {
+ assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
+ /* Separate out the first COPY16() call because the copy length is
+ * almost certain to be short, so the branches have different
+ * probabilities. Since it is almost certain to be short, only do
+ * one COPY16() in the first call. Then, do two calls per loop since
+ * at that point it is more likely to have a high trip count.
+ */
+#ifdef __aarch64__
+ do {
+ COPY16(op, ip);
+ }
+ while (op < oend);
+#else
+ ZSTD_copy16(op, ip);
+ if (16 >= length) return;
+ op += 16;
+ ip += 16;
+ do {
+ COPY16(op, ip);
+ COPY16(op, ip);
+ }
+ while (op < oend);
+#endif
+ }
+}
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ size_t const length = MIN(dstCapacity, srcSize);
+ if (length > 0) {
+ ZSTD_memcpy(dst, src, length);
+ }
+ return length;
+}
+
+/* define "workspace is too large" as this number of times larger than needed */
+#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
+
+/* when workspace is continuously too large
+ * during at least this number of times,
+ * context's memory usage is considered wasteful,
+ * because it's sized to handle a worst case scenario which rarely happens.
+ * In which case, resize it down to free some memory */
+#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
+
+/* Controls whether the input/output buffer is buffered or stable. */
+typedef enum {
+ ZSTD_bm_buffered = 0, /* Buffer the input/output */
+ ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
+} ZSTD_bufferMode_e;
+
+
+/*-*******************************************
+* Private declarations
+*********************************************/
+typedef struct seqDef_s {
+ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
+ U16 litLength;
+ U16 mlBase; /* mlBase == matchLength - MINMATCH */
+} seqDef;
+
+/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
+typedef enum {
+ ZSTD_llt_none = 0, /* no longLengthType */
+ ZSTD_llt_literalLength = 1, /* represents a long literal */
+ ZSTD_llt_matchLength = 2 /* represents a long match */
+} ZSTD_longLengthType_e;
+
+typedef struct {
+ seqDef* sequencesStart;
+ seqDef* sequences; /* ptr to end of sequences */
+ BYTE* litStart;
+ BYTE* lit; /* ptr to end of literals */
+ BYTE* llCode;
+ BYTE* mlCode;
+ BYTE* ofCode;
+ size_t maxNbSeq;
+ size_t maxNbLit;
+
+ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength
+ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
+ * the existing value of the litLength or matchLength by 0x10000.
+ */
+ ZSTD_longLengthType_e longLengthType;
+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */
+} seqStore_t;
+
+typedef struct {
+ U32 litLength;
+ U32 matchLength;
+} ZSTD_sequenceLength;
+
+/*
+ * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
+ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.
+ */
+MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
+{
+ ZSTD_sequenceLength seqLen;
+ seqLen.litLength = seq->litLength;
+ seqLen.matchLength = seq->mlBase + MINMATCH;
+ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ seqLen.litLength += 0xFFFF;
+ }
+ if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ seqLen.matchLength += 0xFFFF;
+ }
+ }
+ return seqLen;
+}
+
+/*
+ * Contains the compressed frame size and an upper-bound for the decompressed frame size.
+ * Note: before using `compressedSize`, check for errors using ZSTD_isError().
+ * similarly, before using `decompressedBound`, check for errors using:
+ * `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
+ */
+typedef struct {
+ size_t compressedSize;
+ unsigned long long decompressedBound;
+} ZSTD_frameSizeInfo; /* decompress & legacy */
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+/* custom memory allocation functions */
+void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
+
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
+{
+ assert(val != 0);
+ {
+# if (__GNUC__ >= 3) /* GCC Intrinsic */
+ return __builtin_clz (val) ^ 31;
+# else /* Software version */
+ static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+ U32 v = val;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+# endif
+ }
+}
+
+/*
+ * Counts the number of trailing zeros of a `size_t`.
+ * Most compilers should support CTZ as a builtin. A backup
+ * implementation is provided if the builtin isn't supported, but
+ * it may not be terribly efficient.
+ */
+MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)
+{
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return __builtin_ctzll((U64)val);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19,
+ 4, 25, 14, 28, 9, 34, 20, 56,
+ 5, 17, 26, 54, 15, 41, 29, 43,
+ 10, 31, 38, 35, 21, 45, 49, 57,
+ 63, 6, 12, 18, 24, 27, 33, 55,
+ 16, 53, 40, 42, 30, 37, 44, 48,
+ 62, 11, 23, 32, 52, 39, 36, 47,
+ 61, 22, 51, 46, 60, 50, 59, 58 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return __builtin_ctz((U32)val);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3,
+ 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7,
+ 26, 12, 18, 6, 11, 5, 10, 9 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+}
+
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+ blockType_e blockType;
+ U32 lastBlock;
+ U32 origSize;
+} blockProperties_t; /* declared here for decompress and fullbench */
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr);
+
+/*! ZSTD_decodeSeqHeaders() :
+ * decode sequence header from src */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize);
+
+/*
+ * @returns true iff the CPU supports dynamic BMI2 dispatch.
+ */
+MEM_STATIC int ZSTD_cpuSupportsBmi2(void)
+{
+ ZSTD_cpuid_t cpuid = ZSTD_cpuid();
+ return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);
+}
+
+
+#endif /* ZSTD_CCOMMON_H_MODULE */
diff --git a/lib/zstd/compress/clevels.h b/lib/zstd/compress/clevels.h
new file mode 100644
index 000000000..d9a76112e
--- /dev/null
+++ b/lib/zstd/compress/clevels.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CLEVELS_H
+#define ZSTD_CLEVELS_H
+
+#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */
+#include <linux/zstd.h>
+
+/*-===== Pre-defined compression levels =====-*/
+
+#define ZSTD_MAX_CLEVEL 22
+
+__attribute__((__unused__))
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{ /* "default" - for any srcSize > 256 KB */
+ /* W, C, H, S, L, TL, strat */
+ { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
+ { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
+ { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
+ { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
+ { 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */
+ { 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */
+ { 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */
+ { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */
+ { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */
+ { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */
+ { 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */
+ { 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */
+ { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
+ { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
+ { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
+ { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
+ { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
+ { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
+ { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
+ { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
+ { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
+},
+{ /* for srcSize <= 256 KB */
+ /* W, C, H, S, L, T, strat */
+ { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
+ { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/
+ { 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/
+ { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
+ { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
+ { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
+ { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
+ { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
+ { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
+ { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 128 KB */
+ /* W, C, H, S, L, T, strat */
+ { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
+ { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
+ { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
+ { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
+ { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
+ { 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
+ { 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
+ { 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
+ { 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
+ { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
+ { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
+ { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
+ { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
+ { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
+ { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
+ { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
+ { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
+ { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+{ /* for srcSize <= 16 KB */
+ /* W, C, H, S, L, T, strat */
+ { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
+ { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
+ { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
+ { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
+ { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
+ { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
+ { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
+ { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
+ { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
+ { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
+ { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
+ { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
+ { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
+ { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
+ { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
+ { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
+ { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
+ { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
+ { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
+ { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
+ { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
+ { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
+},
+};
+
+
+
+#endif /* ZSTD_CLEVELS_H */
diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
new file mode 100644
index 000000000..ec5b1ca6d
--- /dev/null
+++ b/lib/zstd/compress/fse_compress.c
@@ -0,0 +1,668 @@
+/* ******************************************************************
+ * FSE : Finite State Entropy encoder
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "../common/compiler.h"
+#include "../common/mem.h" /* U32, U16, etc. */
+#include "../common/debug.h" /* assert, DEBUGLOG */
+#include "hist.h" /* HIST_count_wksp */
+#include "../common/bitstream.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#include "../common/error_private.h"
+#define ZSTD_DEPS_NEED_MALLOC
+#define ZSTD_DEPS_NEED_MATH64
+#include "../common/zstd_deps.h" /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+
+
+/* **************************************************************
+* Templates
+****************************************************************/
+/*
+ designed to be included
+ for type-specific functions (template emulation in C)
+ Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+# error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+# error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ void* workSpace, size_t wkspSize)
+{
+ U32 const tableSize = 1 << tableLog;
+ U32 const tableMask = tableSize - 1;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 const maxSV1 = maxSymbolValue+1;
+
+ U16* cumul = (U16*)workSpace; /* size = maxSV1 */
+ FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
+
+ U32 highThreshold = tableSize-1;
+
+ assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */
+ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
+ /* CTable header */
+ tableU16[-2] = (U16) tableLog;
+ tableU16[-1] = (U16) maxSymbolValue;
+ assert(tableLog < 16); /* required for threshold strategy to work */
+
+ /* For explanations on how to distribute symbol values over the table :
+ * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+ #ifdef __clang_analyzer__
+ ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */
+ #endif
+
+ /* symbol start positions */
+ { U32 u;
+ cumul[0] = 0;
+ for (u=1; u <= maxSV1; u++) {
+ if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
+ cumul[u] = cumul[u-1] + 1;
+ tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+ } else {
+ assert(normalizedCounter[u-1] >= 0);
+ cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1];
+ assert(cumul[u] >= cumul[u-1]); /* no overflow */
+ } }
+ cumul[maxSV1] = (U16)(tableSize+1);
+ }
+
+ /* Spread symbols */
+ if (highThreshold == tableSize - 1) {
+ /* Case for no low prob count symbols. Lay down 8 bytes at a time
+ * to reduce branch misses since we are operating on a small block
+ */
+ BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
+ { U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ assert(n>=0);
+ pos += (size_t)n;
+ }
+ }
+ /* Spread symbols across the table. Lack of lowprob symbols means that
+ * we don't need variable sized inner loop, so we can unroll the loop and
+ * reduce branch misses.
+ */
+ { size_t position = 0;
+ size_t s;
+ size_t const unroll = 2; /* Experimentally determined optimal unroll */
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableSymbol[uPosition] = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0); /* Must have initialized all positions */
+ }
+ } else {
+ U32 position = 0;
+ U32 symbol;
+ for (symbol=0; symbol<maxSV1; symbol++) {
+ int nbOccurrences;
+ int const freq = normalizedCounter[symbol];
+ for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
+ tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+ position = (position + step) & tableMask;
+ while (position > highThreshold)
+ position = (position + step) & tableMask; /* Low proba area */
+ } }
+ assert(position==0); /* Must have initialized all positions */
+ }
+
+ /* Build table */
+ { U32 u; for (u=0; u<tableSize; u++) {
+ FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
+ tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
+ } }
+
+ /* Build Symbol Transformation Table */
+ { unsigned total = 0;
+ unsigned s;
+ for (s=0; s<=maxSymbolValue; s++) {
+ switch (normalizedCounter[s])
+ {
+ case 0:
+ /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
+ symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
+ break;
+
+ case -1:
+ case 1:
+ symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+ assert(total <= INT_MAX);
+ symbolTT[s].deltaFindState = (int)(total - 1);
+ total ++;
+ break;
+ default :
+ assert(normalizedCounter[s] > 1);
+ { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1);
+ U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
+ symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+ symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]);
+ total += (unsigned)normalizedCounter[s];
+ } } } }
+
+#if 0 /* debug : symbol costs */
+ DEBUGLOG(5, "\n --- table statistics : ");
+ { U32 symbol;
+ for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+ DEBUGLOG(5, "%3u: w=%3i, maxBits=%u, fracBits=%.2f",
+ symbol, normalizedCounter[symbol],
+ FSE_getMaxNbBits(symbolTT, symbol),
+ (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
+ } }
+#endif
+
+ return 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-**************************************************************
+* FSE NCount encoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ + 4 /* bitCount initialized at 4 */
+ + 2 /* first two symbols may use one additional bit each */) / 8)
+ + 1 /* round up to whole nb bytes */
+ + 2 /* additional two bytes for bitstream flush */;
+ return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
+}
+
+static size_t
+FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+ unsigned writeIsSafe)
+{
+ BYTE* const ostart = (BYTE*) header;
+ BYTE* out = ostart;
+ BYTE* const oend = ostart + headerBufferSize;
+ int nbBits;
+ const int tableSize = 1 << tableLog;
+ int remaining;
+ int threshold;
+ U32 bitStream = 0;
+ int bitCount = 0;
+ unsigned symbol = 0;
+ unsigned const alphabetSize = maxSymbolValue + 1;
+ int previousIs0 = 0;
+
+ /* Table Size */
+ bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+ bitCount += 4;
+
+ /* Init */
+ remaining = tableSize+1; /* +1 for extra accuracy */
+ threshold = tableSize;
+ nbBits = tableLog+1;
+
+ while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */
+ if (previousIs0) {
+ unsigned start = symbol;
+ while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
+ if (symbol == alphabetSize) break; /* incorrect distribution */
+ while (symbol >= start+24) {
+ start+=24;
+ bitStream += 0xFFFFU << bitCount;
+ if ((!writeIsSafe) && (out > oend-2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE) bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+=2;
+ bitStream>>=16;
+ }
+ while (symbol >= start+3) {
+ start+=3;
+ bitStream += 3 << bitCount;
+ bitCount += 2;
+ }
+ bitStream += (symbol-start) << bitCount;
+ bitCount += 2;
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+ { int count = normalizedCounter[symbol++];
+ int const max = (2*threshold-1) - remaining;
+ remaining -= count < 0 ? -count : count;
+ count++; /* +1 for extra accuracy */
+ if (count>=threshold)
+ count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+ bitStream += count << bitCount;
+ bitCount += nbBits;
+ bitCount -= (count<max);
+ previousIs0 = (count==1);
+ if (remaining<1) return ERROR(GENERIC);
+ while (remaining<threshold) { nbBits--; threshold>>=1; }
+ }
+ if (bitCount>16) {
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out += 2;
+ bitStream >>= 16;
+ bitCount -= 16;
+ } }
+
+ if (remaining != 1)
+ return ERROR(GENERIC); /* incorrect normalized distribution */
+ assert(symbol <= alphabetSize);
+
+ /* flush remaining bitStream */
+ if ((!writeIsSafe) && (out > oend - 2))
+ return ERROR(dstSize_tooSmall); /* Buffer overflow */
+ out[0] = (BYTE)bitStream;
+ out[1] = (BYTE)(bitStream>>8);
+ out+= (bitCount+7) /8;
+
+ return (out-ostart);
+}
+
+
+size_t FSE_writeNCount (void* buffer, size_t bufferSize,
+ const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
+
+ if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+ return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
+}
+
+
+/*-**************************************************************
+* FSE Compression Code
+****************************************************************/
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+ size_t size;
+ if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+ size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+ return (FSE_CTable*)ZSTD_malloc(size);
+}
+
+void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+ U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
+ U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+ U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+ U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+ U32 tableLog = maxTableLog;
+ U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+ assert(srcSize > 1); /* Not supported, RLE should be used instead */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
+ if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
+ if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+ if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+ return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+/* Secondary normalization method.
+ To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
+{
+ short const NOT_YET_ASSIGNED = -2;
+ U32 s;
+ U32 distributed = 0;
+ U32 ToDistribute;
+
+ /* Init */
+ U32 const lowThreshold = (U32)(total >> tableLog);
+ U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == 0) {
+ norm[s]=0;
+ continue;
+ }
+ if (count[s] <= lowThreshold) {
+ norm[s] = lowProbCount;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+ if (count[s] <= lowOne) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ }
+
+ norm[s]=NOT_YET_ASSIGNED;
+ }
+ ToDistribute = (1 << tableLog) - distributed;
+
+ if (ToDistribute == 0)
+ return 0;
+
+ if ((total / ToDistribute) > lowOne) {
+ /* risk of rounding to zero */
+ lowOne = (U32)((total * 3) / (ToDistribute * 2));
+ for (s=0; s<=maxSymbolValue; s++) {
+ if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+ norm[s] = 1;
+ distributed++;
+ total -= count[s];
+ continue;
+ } }
+ ToDistribute = (1 << tableLog) - distributed;
+ }
+
+ if (distributed == maxSymbolValue+1) {
+ /* all values are pretty poor;
+ probably incompressible data (should have already been detected);
+ find max, then give all remaining points to max */
+ U32 maxV = 0, maxC = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > maxC) { maxV=s; maxC=count[s]; }
+ norm[maxV] += (short)ToDistribute;
+ return 0;
+ }
+
+ if (total == 0) {
+ /* all of the symbols were low enough for the lowOne or lowThreshold */
+ for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
+ if (norm[s] > 0) { ToDistribute--; norm[s]++; }
+ return 0;
+ }
+
+ { U64 const vStepLog = 62 - tableLog;
+ U64 const mid = (1ULL << (vStepLog-1)) - 1;
+ U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
+ U64 tmpTotal = mid;
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (norm[s]==NOT_YET_ASSIGNED) {
+ U64 const end = tmpTotal + (count[s] * rStep);
+ U32 const sStart = (U32)(tmpTotal >> vStepLog);
+ U32 const sEnd = (U32)(end >> vStepLog);
+ U32 const weight = sEnd - sStart;
+ if (weight < 1)
+ return ERROR(GENERIC);
+ norm[s] = (short)weight;
+ tmpTotal = end;
+ } } }
+
+ return 0;
+}
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+ const unsigned* count, size_t total,
+ unsigned maxSymbolValue, unsigned useLowProbCount)
+{
+ /* Sanity checks */
+ if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+ if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
+ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
+
+ { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+ short const lowProbCount = useLowProbCount ? -1 : 1;
+ U64 const scale = 62 - tableLog;
+ U64 const step = ZSTD_div64((U64)1<<62, (U32)total); /* <== here, one division ! */
+ U64 const vStep = 1ULL<<(scale-20);
+ int stillToDistribute = 1<<tableLog;
+ unsigned s;
+ unsigned largest=0;
+ short largestP=0;
+ U32 lowThreshold = (U32)(total >> tableLog);
+
+ for (s=0; s<=maxSymbolValue; s++) {
+ if (count[s] == total) return 0; /* rle special case */
+ if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+ if (count[s] <= lowThreshold) {
+ normalizedCounter[s] = lowProbCount;
+ stillToDistribute--;
+ } else {
+ short proba = (short)((count[s]*step) >> scale);
+ if (proba<8) {
+ U64 restToBeat = vStep * rtbTable[proba];
+ proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+ }
+ if (proba > largestP) { largestP=proba; largest=s; }
+ normalizedCounter[s] = proba;
+ stillToDistribute -= proba;
+ } }
+ if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+ /* corner case, need another normalization method */
+ size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
+ if (FSE_isError(errorCode)) return errorCode;
+ }
+ else normalizedCounter[largest] += (short)stillToDistribute;
+ }
+
+#if 0
+ { /* Print Table (debug) */
+ U32 s;
+ U32 nTotal = 0;
+ for (s=0; s<=maxSymbolValue; s++)
+ RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
+ for (s=0; s<=maxSymbolValue; s++)
+ nTotal += abs(normalizedCounter[s]);
+ if (nTotal != (1U<<tableLog))
+ RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+ getchar();
+ }
+#endif
+
+ return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+ const unsigned tableSize = 1 << nbBits;
+ const unsigned tableMask = tableSize - 1;
+ const unsigned maxSymbolValue = tableMask;
+ void* const ptr = ct;
+ U16* const tableU16 = ( (U16*) ptr) + 2;
+ void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
+ FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+ unsigned s;
+
+ /* Sanity checks */
+ if (nbBits < 1) return ERROR(GENERIC); /* min size */
+
+ /* header */
+ tableU16[-2] = (U16) nbBits;
+ tableU16[-1] = (U16) maxSymbolValue;
+
+ /* Build table */
+ for (s=0; s<tableSize; s++)
+ tableU16[s] = (U16)(tableSize + s);
+
+ /* Build Symbol Transformation Table */
+ { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+ for (s=0; s<=maxSymbolValue; s++) {
+ symbolTT[s].deltaNbBits = deltaNbBits;
+ symbolTT[s].deltaFindState = s-1;
+ } }
+
+ return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+ void* ptr = ct;
+ U16* tableU16 = ( (U16*) ptr) + 2;
+ void* FSCTptr = (U32*)ptr + 2;
+ FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+ /* header */
+ tableU16[-2] = (U16) 0;
+ tableU16[-1] = (U16) symbolValue;
+
+ /* Build table */
+ tableU16[0] = 0;
+ tableU16[1] = 0; /* just in case */
+
+ /* Build Symbol Transformation Table */
+ symbolTT[symbolValue].deltaNbBits = 0;
+ symbolTT[symbolValue].deltaFindState = 0;
+
+ return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct, const unsigned fast)
+{
+ const BYTE* const istart = (const BYTE*) src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip=iend;
+
+ BIT_CStream_t bitC;
+ FSE_CState_t CState1, CState2;
+
+ /* init */
+ if (srcSize <= 2) return 0;
+ { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+ if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
+
+#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+ if (srcSize & 1) {
+ FSE_initCState2(&CState1, ct, *--ip);
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ } else {
+ FSE_initCState2(&CState2, ct, *--ip);
+ FSE_initCState2(&CState1, ct, *--ip);
+ }
+
+ /* join to mod 4 */
+ srcSize -= 2;
+ if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ /* 2 or 4 encoding per loop */
+ while ( ip>istart ) {
+
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
+ FSE_FLUSHBITS(&bitC);
+
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+ if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
+ FSE_encodeSymbol(&bitC, &CState2, *--ip);
+ FSE_encodeSymbol(&bitC, &CState1, *--ip);
+ }
+
+ FSE_FLUSHBITS(&bitC);
+ }
+
+ FSE_flushCState(&bitC, &CState2);
+ FSE_flushCState(&bitC, &CState1);
+ return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const FSE_CTable* ct)
+{
+ unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+ if (fast)
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+ else
+ return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
+
+#endif /* FSE_COMMONDEFS_ONLY */
diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
new file mode 100644
index 000000000..3ddc6dfb6
--- /dev/null
+++ b/lib/zstd/compress/hist.c
@@ -0,0 +1,165 @@
+/* ******************************************************************
+ * hist : Histogram functions
+ * part of Finite State Entropy project
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "../common/mem.h" /* U32, BYTE, etc. */
+#include "../common/debug.h" /* assert, DEBUGLOG */
+#include "../common/error_private.h" /* ERROR */
+#include "hist.h"
+
+
+/* --- Error management --- */
+unsigned HIST_isError(size_t code) { return ERR_isError(code); }
+
+/*-**************************************************************
+ * Histogram functions
+ ****************************************************************/
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const end = ip + srcSize;
+ unsigned maxSymbolValue = *maxSymbolValuePtr;
+ unsigned largestCount=0;
+
+ ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
+ if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+ while (ip<end) {
+ assert(*ip <= maxSymbolValue);
+ count[*ip++]++;
+ }
+
+ while (!count[maxSymbolValue]) maxSymbolValue--;
+ *maxSymbolValuePtr = maxSymbolValue;
+
+ { U32 s;
+ for (s=0; s<=maxSymbolValue; s++)
+ if (count[s] > largestCount) largestCount = count[s];
+ }
+
+ return largestCount;
+}
+
+typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
+
+/* HIST_count_parallel_wksp() :
+ * store histogram into 4 intermediate tables, recombined at the end.
+ * this design makes better use of OoO cpus,
+ * and is noticeably faster when some values are heavily repeated.
+ * But it needs some additional workspace for intermediate tables.
+ * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
+ * @return : largest histogram frequency,
+ * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
+static size_t HIST_count_parallel_wksp(
+ unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ HIST_checkInput_e check,
+ U32* const workSpace)
+{
+ const BYTE* ip = (const BYTE*)source;
+ const BYTE* const iend = ip+sourceSize;
+ size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
+ unsigned max=0;
+ U32* const Counting1 = workSpace;
+ U32* const Counting2 = Counting1 + 256;
+ U32* const Counting3 = Counting2 + 256;
+ U32* const Counting4 = Counting3 + 256;
+
+ /* safety checks */
+ assert(*maxSymbolValuePtr <= 255);
+ if (!sourceSize) {
+ ZSTD_memset(count, 0, countSize);
+ *maxSymbolValuePtr = 0;
+ return 0;
+ }
+ ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+ /* by stripes of 16 bytes */
+ { U32 cached = MEM_read32(ip); ip += 4;
+ while (ip < iend-15) {
+ U32 c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ c = cached; cached = MEM_read32(ip); ip += 4;
+ Counting1[(BYTE) c ]++;
+ Counting2[(BYTE)(c>>8) ]++;
+ Counting3[(BYTE)(c>>16)]++;
+ Counting4[ c>>24 ]++;
+ }
+ ip-=4;
+ }
+
+ /* finish last symbols */
+ while (ip<iend) Counting1[*ip++]++;
+
+ { U32 s;
+ for (s=0; s<256; s++) {
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+ if (Counting1[s] > max) max = Counting1[s];
+ } }
+
+ { unsigned maxSymbolValue = 255;
+ while (!Counting1[maxSymbolValue]) maxSymbolValue--;
+ if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
+ *maxSymbolValuePtr = maxSymbolValue;
+ ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
+ }
+ return (size_t)max;
+}
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if (sourceSize < 1500) /* heuristic threshold */
+ return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
+}
+
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* source, size_t sourceSize,
+ void* workSpace, size_t workSpaceSize)
+{
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
+ if (*maxSymbolValuePtr < 255)
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
+ *maxSymbolValuePtr = 255;
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
+}
+
diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
new file mode 100644
index 000000000..fc1830abc
--- /dev/null
+++ b/lib/zstd/compress/hist.h
@@ -0,0 +1,75 @@
+/* ******************************************************************
+ * hist : Histogram functions
+ * part of Finite State Entropy project
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* --- dependencies --- */
+#include "../common/zstd_deps.h" /* size_t */
+
+
+/* --- simple histogram functions --- */
+
+/*! HIST_count():
+ * Provides the precise count of each byte within a table 'count'.
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+ * Updates *maxSymbolValuePtr with actual largest symbol value detected.
+ * @return : count of the most frequent symbol (which isn't identified).
+ * or an error code, which can be tested using HIST_isError().
+ * note : if return == srcSize, there is only one symbol.
+ */
+size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+unsigned HIST_isError(size_t code); /*< tells if a return value is an error code */
+
+
+/* --- advanced histogram functions --- */
+
+#define HIST_WKSP_SIZE_U32 1024
+#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
+/* HIST_count_wksp() :
+ * Same as HIST_count(), but using an externally provided scratch buffer.
+ * Benefit is this function will use very little stack space.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/* HIST_countFast() :
+ * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
+ */
+size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
+
+/* HIST_countFast_wksp() :
+ * Same as HIST_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
+ */
+size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t workSpaceSize);
+
+/*! HIST_count_simple() :
+ * Same as HIST_countFast(), this function is unsafe,
+ * and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
+ * It is also a bit slower for large inputs.
+ * However, it does not need any additional memory (not even on stack).
+ * @return : count of the most frequent symbol.
+ * Note this function doesn't produce any error (i.e. it must succeed).
+ */
+unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+ const void* src, size_t srcSize);
diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
new file mode 100644
index 000000000..74ef0db47
--- /dev/null
+++ b/lib/zstd/compress/huf_compress.c
@@ -0,0 +1,1335 @@
+/* ******************************************************************
+ * Huffman encoder, part of New Generation Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Compiler specifics
+****************************************************************/
+
+
+/* **************************************************************
+* Includes
+****************************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
+#include "../common/compiler.h"
+#include "../common/bitstream.h"
+#include "hist.h"
+#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
+#include "../common/fse.h" /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/error_private.h"
+
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
+
+
+/* **************************************************************
+* Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+ return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+* HUF : Huffman block compression
+*********************************************************/
+#define HUF_WORKSPACE_MAX_ALIGNMENT 8
+
+static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
+{
+ size_t const mask = align - 1;
+ size_t const rem = (size_t)workspace & mask;
+ size_t const add = (align - rem) & mask;
+ BYTE* const aligned = (BYTE*)workspace + add;
+ assert((align & (align - 1)) == 0); /* pow 2 */
+ assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
+ if (*workspaceSizePtr >= add) {
+ assert(add < align);
+ assert(((size_t)aligned & mask) == 0);
+ *workspaceSizePtr -= add;
+ return aligned;
+ } else {
+ *workspaceSizePtr = 0;
+ return NULL;
+ }
+}
+
+
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+
+typedef struct {
+ FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+ U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
+ unsigned count[HUF_TABLELOG_MAX+1];
+ S16 norm[HUF_TABLELOG_MAX+1];
+} HUF_CompressWeightsWksp;
+
+static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
+{
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* op = ostart;
+ BYTE* const oend = ostart + dstSize;
+
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
+ U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+ HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+
+ if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
+
+ /* init conditions */
+ if (wtSize <= 1) return 0; /* Not compressible */
+
+ /* Scan input and build symbol stats */
+ { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
+ if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
+ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
+ }
+
+ tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+ CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
+ op += hSize;
+ }
+
+ /* Compress */
+ CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
+ { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
+ if (cSize == 0) return 0; /* not enough space for compressed data */
+ op += cSize;
+ }
+
+ return (size_t)(op-ostart);
+}
+
+static size_t HUF_getNbBits(HUF_CElt elt)
+{
+ return elt & 0xFF;
+}
+
+static size_t HUF_getNbBitsFast(HUF_CElt elt)
+{
+ return elt;
+}
+
+static size_t HUF_getValue(HUF_CElt elt)
+{
+ return elt & ~0xFF;
+}
+
+static size_t HUF_getValueFast(HUF_CElt elt)
+{
+ return elt;
+}
+
+static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
+{
+ assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
+ *elt = nbBits;
+}
+
+static void HUF_setValue(HUF_CElt* elt, size_t value)
+{
+ size_t const nbBits = HUF_getNbBits(*elt);
+ if (nbBits > 0) {
+ assert((value >> nbBits) == 0);
+ *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
+ }
+}
+
+typedef struct {
+ HUF_CompressWeightsWksp wksp;
+ BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+} HUF_WriteCTableWksp;
+
+size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
+ void* workspace, size_t workspaceSize)
+{
+ HUF_CElt const* const ct = CTable + 1;
+ BYTE* op = (BYTE*)dst;
+ U32 n;
+ HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
+
+ /* check conditions */
+ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+
+ /* convert to weight */
+ wksp->bitsToWeight[0] = 0;
+ for (n=1; n<huffLog+1; n++)
+ wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+ for (n=0; n<maxSymbolValue; n++)
+ wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
+
+ /* attempt weights compression by FSE */
+ if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
+ { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
+ if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
+ op[0] = (BYTE)hSize;
+ return hSize+1;
+ } }
+
+ /* write raw values as 4-bits (max : 15) */
+ if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
+ if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
+ op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+ wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
+ for (n=0; n<maxSymbolValue; n+=2)
+ op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
+ return ((maxSymbolValue+1)/2) + 1;
+}
+
+/*! HUF_writeCTable() :
+ `CTable` : Huffman tree to save, using huf representation.
+ @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
+{
+ HUF_WriteCTableWksp wksp;
+ return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
+}
+
+
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
+{
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ HUF_CElt* const ct = CTable + 1;
+
+ /* get symbol weights */
+ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+ *hasZeroWeights = (rankVal[0] > 0);
+
+ /* check result */
+ if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+
+ CTable[0] = tableLog;
+
+ /* Prepare base value per rank */
+ { U32 n, nextRankStart = 0;
+ for (n=1; n<=tableLog; n++) {
+ U32 curr = nextRankStart;
+ nextRankStart += (rankVal[n] << (n-1));
+ rankVal[n] = curr;
+ } }
+
+ /* fill nbBits */
+ { U32 n; for (n=0; n<nbSymbols; n++) {
+ const U32 w = huffWeight[n];
+ HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
+ } }
+
+ /* fill val */
+ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
+ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+ { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
+ /* determine stating value per rank */
+ valPerRank[tableLog+1] = 0; /* for w==0 */
+ { U16 min = 0;
+ U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ /* assign value within rank, symbol order */
+ { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
+ }
+
+ *maxSymbolValuePtr = nbSymbols - 1;
+ return readSize;
+}
+
+U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
+{
+ const HUF_CElt* ct = CTable + 1;
+ assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
+ return (U32)HUF_getNbBits(ct[symbolValue]);
+}
+
+
+typedef struct nodeElt_s {
+ U32 count;
+ U16 parent;
+ BYTE byte;
+ BYTE nbBits;
+} nodeElt;
+
+/*
+ * HUF_setMaxHeight():
+ * Enforces maxNbBits on the Huffman tree described in huffNode.
+ *
+ * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
+ * the tree to so that it is a valid canonical Huffman tree.
+ *
+ * @pre The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits == huffNode[lastNonNull].nbBits.
+ * @post The sum of the ranks of each symbol == 2^largestBits,
+ * where largestBits is the return value <= maxNbBits.
+ *
+ * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
+ * @param lastNonNull The symbol with the lowest count in the Huffman tree.
+ * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
+ * may not respect. After this function the Huffman tree will
+ * respect maxNbBits.
+ * @return The maximum number of bits of the Huffman tree after adjustment,
+ * necessarily no more than maxNbBits.
+ */
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+ const U32 largestBits = huffNode[lastNonNull].nbBits;
+ /* early exit : no elt > maxNbBits, so the tree is already valid. */
+ if (largestBits <= maxNbBits) return largestBits;
+
+ /* there are several too large elements (at least >= 2) */
+ { int totalCost = 0;
+ const U32 baseCost = 1 << (largestBits - maxNbBits);
+ int n = (int)lastNonNull;
+
+ /* Adjust any ranks > maxNbBits to maxNbBits.
+ * Compute totalCost, which is how far the sum of the ranks is
+ * we are over 2^largestBits after adjust the offending ranks.
+ */
+ while (huffNode[n].nbBits > maxNbBits) {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+ huffNode[n].nbBits = (BYTE)maxNbBits;
+ n--;
+ }
+ /* n stops at huffNode[n].nbBits <= maxNbBits */
+ assert(huffNode[n].nbBits <= maxNbBits);
+ /* n end at index of smallest symbol using < maxNbBits */
+ while (huffNode[n].nbBits == maxNbBits) --n;
+
+ /* renorm totalCost from 2^largestBits to 2^maxNbBits
+ * note : totalCost is necessarily a multiple of baseCost */
+ assert((totalCost & (baseCost - 1)) == 0);
+ totalCost >>= (largestBits - maxNbBits);
+ assert(totalCost > 0);
+
+ /* repay normalized cost */
+ { U32 const noSymbol = 0xF0F0F0F0;
+ U32 rankLast[HUF_TABLELOG_MAX+2];
+
+ /* Get pos of last (smallest = lowest cum. count) symbol per rank */
+ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
+ { U32 currentNbBits = maxNbBits;
+ int pos;
+ for (pos=n ; pos >= 0; pos--) {
+ if (huffNode[pos].nbBits >= currentNbBits) continue;
+ currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
+ rankLast[maxNbBits-currentNbBits] = (U32)pos;
+ } }
+
+ while (totalCost > 0) {
+ /* Try to reduce the next power of 2 above totalCost because we
+ * gain back half the rank.
+ */
+ U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
+ for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+ U32 const highPos = rankLast[nBitsToDecrease];
+ U32 const lowPos = rankLast[nBitsToDecrease-1];
+ if (highPos == noSymbol) continue;
+ /* Decrease highPos if no symbols of lowPos or if it is
+ * not cheaper to remove 2 lowPos than highPos.
+ */
+ if (lowPos == noSymbol) break;
+ { U32 const highTotal = huffNode[highPos].count;
+ U32 const lowTotal = 2 * huffNode[lowPos].count;
+ if (highTotal <= lowTotal) break;
+ } }
+ /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
+ /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+ while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+ nBitsToDecrease++;
+ assert(rankLast[nBitsToDecrease] != noSymbol);
+ /* Increase the number of bits to gain back half the rank cost. */
+ totalCost -= 1 << (nBitsToDecrease-1);
+ huffNode[rankLast[nBitsToDecrease]].nbBits++;
+
+ /* Fix up the new rank.
+ * If the new rank was empty, this symbol is now its smallest.
+ * Otherwise, this symbol will be the largest in the new rank so no adjustment.
+ */
+ if (rankLast[nBitsToDecrease-1] == noSymbol)
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
+ /* Fix up the old rank.
+ * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
+ * it must be the only symbol in its rank, so the old rank now has no symbols.
+ * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
+ * the smallest node in the rank. If the previous position belongs to a different rank,
+ * then the rank is now empty.
+ */
+ if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
+ rankLast[nBitsToDecrease] = noSymbol;
+ else {
+ rankLast[nBitsToDecrease]--;
+ if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+ rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
+ }
+ } /* while (totalCost > 0) */
+
+ /* If we've removed too much weight, then we have to add it back.
+ * To avoid overshooting again, we only adjust the smallest rank.
+ * We take the largest nodes from the lowest rank 0 and move them
+ * to rank 1. There's guaranteed to be enough rank 0 symbols because
+ * TODO.
+ */
+ while (totalCost < 0) { /* Sometimes, cost correction overshoot */
+ /* special case : no rank 1 symbol (using maxNbBits-1);
+ * let's create one from largest rank 0 (using maxNbBits).
+ */
+ if (rankLast[1] == noSymbol) {
+ while (huffNode[n].nbBits == maxNbBits) n--;
+ huffNode[n+1].nbBits--;
+ assert(n >= 0);
+ rankLast[1] = (U32)(n+1);
+ totalCost++;
+ continue;
+ }
+ huffNode[ rankLast[1] + 1 ].nbBits--;
+ rankLast[1]++;
+ totalCost ++;
+ }
+ } /* repay normalized cost */
+ } /* there are several too large elements (at least >= 2) */
+
+ return maxNbBits;
+}
+
+typedef struct {
+ U16 base;
+ U16 curr;
+} rankPos;
+
+typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+
+/* Number of buckets available for HUF_sort() */
+#define RANK_POSITION_TABLE_SIZE 192
+
+typedef struct {
+ huffNodeTable huffNodeTbl;
+ rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
+} HUF_buildCTable_wksp_tables;
+
+/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
+ * Strategy is to use as many buckets as possible for representing distinct
+ * counts while using the remainder to represent all "large" counts.
+ *
+ * To satisfy this requirement for 192 buckets, we can do the following:
+ * Let buckets 0-166 represent distinct counts of [0, 166]
+ * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
+ */
+#define RANK_POSITION_MAX_COUNT_LOG 32
+#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */
+#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */
+
+/* Return the appropriate bucket index for a given count. See definition of
+ * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
+ */
+static U32 HUF_getIndex(U32 const count) {
+ return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
+ ? count
+ : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
+}
+
+/* Helper swap function for HUF_quickSortPartition() */
+static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
+ nodeElt tmp = *a;
+ *a = *b;
+ *b = tmp;
+}
+
+/* Returns 0 if the huffNode array is not sorted by descending count */
+MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
+ U32 i;
+ for (i = 1; i < maxSymbolValue1; ++i) {
+ if (huffNode[i].count > huffNode[i-1].count) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Insertion sort by descending order */
+HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
+ int i;
+ int const size = high-low+1;
+ huffNode += low;
+ for (i = 1; i < size; ++i) {
+ nodeElt const key = huffNode[i];
+ int j = i - 1;
+ while (j >= 0 && huffNode[j].count < key.count) {
+ huffNode[j + 1] = huffNode[j];
+ j--;
+ }
+ huffNode[j + 1] = key;
+ }
+}
+
+/* Pivot helper function for quicksort. */
+static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
+ /* Simply select rightmost element as pivot. "Better" selectors like
+ * median-of-three don't experimentally appear to have any benefit.
+ */
+ U32 const pivot = arr[high].count;
+ int i = low - 1;
+ int j = low;
+ for ( ; j < high; j++) {
+ if (arr[j].count > pivot) {
+ i++;
+ HUF_swapNodes(&arr[i], &arr[j]);
+ }
+ }
+ HUF_swapNodes(&arr[i + 1], &arr[high]);
+ return i + 1;
+}
+
+/* Classic quicksort by descending with partially iterative calls
+ * to reduce worst case callstack size.
+ */
+static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
+ int const kInsertionSortThreshold = 8;
+ if (high - low < kInsertionSortThreshold) {
+ HUF_insertionSort(arr, low, high);
+ return;
+ }
+ while (low < high) {
+ int const idx = HUF_quickSortPartition(arr, low, high);
+ if (idx - low < high - idx) {
+ HUF_simpleQuickSort(arr, low, idx - 1);
+ low = idx + 1;
+ } else {
+ HUF_simpleQuickSort(arr, idx + 1, high);
+ high = idx - 1;
+ }
+ }
+}
+
+/*
+ * HUF_sort():
+ * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
+ * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
+ *
+ * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
+ * Must have (maxSymbolValue + 1) entries.
+ * @param[in] count Histogram of the symbols.
+ * @param[in] maxSymbolValue Maximum symbol value.
+ * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
+ */
+static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
+ U32 n;
+ U32 const maxSymbolValue1 = maxSymbolValue+1;
+
+ /* Compute base and set curr to base.
+ * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
+ * See HUF_getIndex to see bucketing strategy.
+ * We attribute each symbol to lowerRank's base value, because we want to know where
+ * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
+ */
+ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 lowerRank = HUF_getIndex(count[n]);
+ assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
+ rankPosition[lowerRank].base++;
+ }
+
+ assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
+ /* Set up the rankPosition table */
+ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
+ rankPosition[n-1].base += rankPosition[n].base;
+ rankPosition[n-1].curr = rankPosition[n-1].base;
+ }
+
+ /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
+ for (n = 0; n < maxSymbolValue1; ++n) {
+ U32 const c = count[n];
+ U32 const r = HUF_getIndex(c) + 1;
+ U32 const pos = rankPosition[r].curr++;
+ assert(pos < maxSymbolValue1);
+ huffNode[pos].count = c;
+ huffNode[pos].byte = (BYTE)n;
+ }
+
+ /* Sort each bucket. */
+ for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
+ U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base;
+ U32 const bucketStartIdx = rankPosition[n].base;
+ if (bucketSize > 1) {
+ assert(bucketStartIdx < maxSymbolValue1);
+ HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
+ }
+ }
+
+ assert(HUF_isSorted(huffNode, maxSymbolValue1));
+}
+
+/* HUF_buildCTable_wksp() :
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+
+/* HUF_buildTree():
+ * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
+ *
+ * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
+ * @param maxSymbolValue The maximum symbol value.
+ * @return The smallest node in the Huffman tree (by count).
+ */
+static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
+{
+ nodeElt* const huffNode0 = huffNode - 1;
+ int nonNullRank;
+ int lowS, lowN;
+ int nodeNb = STARTNODE;
+ int n, nodeRoot;
+ /* init for parents */
+ nonNullRank = (int)maxSymbolValue;
+ while(huffNode[nonNullRank].count == 0) nonNullRank--;
+ lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+ huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+ huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
+ nodeNb++; lowS-=2;
+ for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+ huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
+
+ /* create parents */
+ while (nodeNb <= nodeRoot) {
+ int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+ huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+ huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
+ nodeNb++;
+ }
+
+ /* distribute weights (unlimited tree height) */
+ huffNode[nodeRoot].nbBits = 0;
+ for (n=nodeRoot-1; n>=STARTNODE; n--)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+ for (n=0; n<=nonNullRank; n++)
+ huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
+ return nonNullRank;
+}
+
+/*
+ * HUF_buildCTableFromTree():
+ * Build the CTable given the Huffman tree in huffNode.
+ *
+ * @param[out] CTable The output Huffman CTable.
+ * @param huffNode The Huffman tree.
+ * @param nonNullRank The last and smallest node in the Huffman tree.
+ * @param maxSymbolValue The maximum symbol value.
+ * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
+ */
+static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
+{
+ HUF_CElt* const ct = CTable + 1;
+ /* fill result into ctable (val, nbBits) */
+ int n;
+ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+ int const alphabetSize = (int)(maxSymbolValue + 1);
+ for (n=0; n<=nonNullRank; n++)
+ nbPerRank[huffNode[n].nbBits]++;
+ /* determine starting value per rank */
+ { U16 min = 0;
+ for (n=(int)maxNbBits; n>0; n--) {
+ valPerRank[n] = min; /* get starting value within each rank */
+ min += nbPerRank[n];
+ min >>= 1;
+ } }
+ for (n=0; n<alphabetSize; n++)
+ HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
+ for (n=0; n<alphabetSize; n++)
+ HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
+ CTable[0] = maxNbBits;
+}
+
+size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+ HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
+ nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
+ nodeElt* const huffNode = huffNode0+1;
+ int nonNullRank;
+
+ /* safety checks */
+ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
+ return ERROR(workSpace_tooSmall);
+ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
+ return ERROR(maxSymbolValue_tooLarge);
+ ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
+
+ /* sort, decreasing order */
+ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
+
+ /* build tree */
+ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
+
+ /* enforce maxTableLog */
+ maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
+
+ HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
+
+ return maxNbBits;
+}
+
+size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
+{
+ HUF_CElt const* ct = CTable + 1;
+ size_t nbBits = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ nbBits += HUF_getNbBits(ct[s]) * count[s];
+ }
+ return nbBits >> 3;
+}
+
+int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
+ HUF_CElt const* ct = CTable + 1;
+ int bad = 0;
+ int s;
+ for (s = 0; s <= (int)maxSymbolValue; ++s) {
+ bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
+ }
+ return !bad;
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+/* HUF_CStream_t:
+ * Huffman uses its own BIT_CStream_t implementation.
+ * There are three major differences from BIT_CStream_t:
+ * 1. HUF_addBits() takes a HUF_CElt (size_t) which is
+ * the pair (nbBits, value) in the format:
+ * format:
+ * - Bits [0, 4) = nbBits
+ * - Bits [4, 64 - nbBits) = 0
+ * - Bits [64 - nbBits, 64) = value
+ * 2. The bitContainer is built from the upper bits and
+ * right shifted. E.g. to add a new value of N bits
+ * you right shift the bitContainer by N, then or in
+ * the new value into the N upper bits.
+ * 3. The bitstream has two bit containers. You can add
+ * bits to the second container and merge them into
+ * the first container.
+ */
+
+#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
+
+typedef struct {
+ size_t bitContainer[2];
+ size_t bitPos[2];
+
+ BYTE* startPtr;
+ BYTE* ptr;
+ BYTE* endPtr;
+} HUF_CStream_t;
+
+/*! HUF_initCStream():
+ * Initializes the bitstream.
+ * @returns 0 or an error code.
+ */
+static size_t HUF_initCStream(HUF_CStream_t* bitC,
+ void* startPtr, size_t dstCapacity)
+{
+ ZSTD_memset(bitC, 0, sizeof(*bitC));
+ bitC->startPtr = (BYTE*)startPtr;
+ bitC->ptr = bitC->startPtr;
+ bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
+ if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
+ return 0;
+}
+
+/*! HUF_addBits():
+ * Adds the symbol stored in HUF_CElt elt to the bitstream.
+ *
+ * @param elt The element we're adding. This is a (nbBits, value) pair.
+ * See the HUF_CStream_t docs for the format.
+ * @param idx Insert into the bitstream at this idx.
+ * @param kFast This is a template parameter. If the bitstream is guaranteed
+ * to have at least 4 unused bits after this call it may be 1,
+ * otherwise it must be 0. HUF_addBits() is faster when fast is set.
+ */
+FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
+{
+ assert(idx <= 1);
+ assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
+ /* This is efficient on x86-64 with BMI2 because shrx
+ * only reads the low 6 bits of the register. The compiler
+ * knows this and elides the mask. When fast is set,
+ * every operation can use the same value loaded from elt.
+ */
+ bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
+ bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
+ /* We only read the low 8 bits of bitC->bitPos[idx] so it
+ * doesn't matter that the high bits have noise from the value.
+ */
+ bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
+ assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+ /* The last 4-bits of elt are dirty if fast is set,
+ * so we must not be overwriting bits that have already been
+ * inserted into the bit container.
+ */
+#if DEBUGLEVEL >= 1
+ {
+ size_t const nbBits = HUF_getNbBits(elt);
+ size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1;
+ (void)dirtyBits;
+ /* Middle bits are 0. */
+ assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
+ /* We didn't overwrite any bits in the bit container. */
+ assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+ (void)dirtyBits;
+ }
+#endif
+}
+
+FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
+{
+ bitC->bitContainer[1] = 0;
+ bitC->bitPos[1] = 0;
+}
+
+/*! HUF_mergeIndex1() :
+ * Merges the bit container @ index 1 into the bit container @ index 0
+ * and zeros the bit container @ index 1.
+ */
+FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
+{
+ assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
+ bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
+ bitC->bitContainer[0] |= bitC->bitContainer[1];
+ bitC->bitPos[0] += bitC->bitPos[1];
+ assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
+}
+
+/*! HUF_flushBits() :
+* Flushes the bits in the bit container @ index 0.
+*
+* @post bitPos will be < 8.
+* @param kFast If kFast is set then we must know a-priori that
+* the bit container will not overflow.
+*/
+FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
+{
+ /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
+ size_t const nbBits = bitC->bitPos[0] & 0xFF;
+ size_t const nbBytes = nbBits >> 3;
+ /* The top nbBits bits of bitContainer are the ones we need. */
+ size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
+ /* Mask bitPos to account for the bytes we consumed. */
+ bitC->bitPos[0] &= 7;
+ assert(nbBits > 0);
+ assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
+ assert(bitC->ptr <= bitC->endPtr);
+ MEM_writeLEST(bitC->ptr, bitContainer);
+ bitC->ptr += nbBytes;
+ assert(!kFast || bitC->ptr <= bitC->endPtr);
+ if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+ /* bitContainer doesn't need to be modified because the leftover
+ * bits are already the top bitPos bits. And we don't care about
+ * noise in the lower values.
+ */
+}
+
+/*! HUF_endMark()
+ * @returns The Huffman stream end mark: A 1-bit value = 1.
+ */
+static HUF_CElt HUF_endMark(void)
+{
+ HUF_CElt endMark;
+ HUF_setNbBits(&endMark, 1);
+ HUF_setValue(&endMark, 1);
+ return endMark;
+}
+
+/*! HUF_closeCStream() :
+ * @return Size of CStream, in bytes,
+ * or 0 if it could not fit into dstBuffer */
+static size_t HUF_closeCStream(HUF_CStream_t* bitC)
+{
+ HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
+ HUF_flushBits(bitC, /* kFast */ 0);
+ {
+ size_t const nbBits = bitC->bitPos[0] & 0xFF;
+ if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+ return (bitC->ptr - bitC->startPtr) + (nbBits > 0);
+ }
+}
+
+FORCE_INLINE_TEMPLATE void
+HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
+{
+ HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
+}
+
+FORCE_INLINE_TEMPLATE void
+HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
+ const BYTE* ip, size_t srcSize,
+ const HUF_CElt* ct,
+ int kUnroll, int kFastFlush, int kLastFast)
+{
+ /* Join to kUnroll */
+ int n = (int)srcSize;
+ int rem = n % kUnroll;
+ if (rem > 0) {
+ for (; rem > 0; --rem) {
+ HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
+ }
+ HUF_flushBits(bitC, kFastFlush);
+ }
+ assert(n % kUnroll == 0);
+
+ /* Join to 2 * kUnroll */
+ if (n % (2 * kUnroll)) {
+ int u;
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
+ HUF_flushBits(bitC, kFastFlush);
+ n -= kUnroll;
+ }
+ assert(n % (2 * kUnroll) == 0);
+
+ for (; n>0; n-= 2 * kUnroll) {
+ /* Encode kUnroll symbols into the bitstream @ index 0. */
+ int u;
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
+ HUF_flushBits(bitC, kFastFlush);
+ /* Encode kUnroll symbols into the bitstream @ index 1.
+ * This allows us to start filling the bit container
+ * without any data dependencies.
+ */
+ HUF_zeroIndex1(bitC);
+ for (u = 1; u < kUnroll; ++u) {
+ HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
+ }
+ HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
+ /* Merge bitstream @ index 1 into the bitstream @ index 0 */
+ HUF_mergeIndex1(bitC);
+ HUF_flushBits(bitC, kFastFlush);
+ }
+ assert(n == 0);
+
+}
+
+/*
+ * Returns a tight upper bound on the output space needed by Huffman
+ * with 8 bytes buffer to handle over-writes. If the output is at least
+ * this large we don't need to do bounds checks during Huffman encoding.
+ */
+static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
+{
+ return ((srcSize * tableLog) >> 3) + 8;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ U32 const tableLog = (U32)CTable[0];
+ HUF_CElt const* ct = CTable + 1;
+ const BYTE* ip = (const BYTE*) src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+ HUF_CStream_t bitC;
+
+ /* init */
+ if (dstSize < 8) return 0; /* not enough space to compress */
+ { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
+ if (HUF_isError(initErr)) return 0; }
+
+ if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
+ else {
+ if (MEM_32bits()) {
+ switch (tableLog) {
+ case 11:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 10: ZSTD_FALLTHROUGH;
+ case 9: ZSTD_FALLTHROUGH;
+ case 8:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ case 7: ZSTD_FALLTHROUGH;
+ default:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ }
+ } else {
+ switch (tableLog) {
+ case 11:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 10:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ case 9:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 8:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 7:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
+ break;
+ case 6: ZSTD_FALLTHROUGH;
+ default:
+ HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
+ break;
+ }
+ }
+ }
+ assert(bitC.ptr <= bitC.endPtr);
+
+ return HUF_closeCStream(&bitC);
+}
+
+#if DYNAMIC_BMI2
+
+static BMI2_TARGET_ATTRIBUTE size_t
+HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ if (bmi2) {
+ return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
+ }
+ return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
+}
+
+#else
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, const int bmi2)
+{
+ (void)bmi2;
+ return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+#endif
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
+{
+ return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+}
+
+static size_t
+HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ const HUF_CElt* CTable, int bmi2)
+{
+ size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
+ if (srcSize < 12) return 0; /* no saving possible : too small input */
+ op += 6; /* jumpTable */
+
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ MEM_writeLE16(ostart, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ MEM_writeLE16(ostart+2, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ MEM_writeLE16(ostart+4, (U16)cSize);
+ op += cSize;
+ }
+
+ ip += segmentSize;
+ assert(op <= oend);
+ assert(ip <= iend);
+ { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
+ if (cSize == 0 || cSize > 65535) return 0;
+ op += cSize;
+ }
+
+ return (size_t)(op-ostart);
+}
+
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+ return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2)
+{
+ return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2);
+}
+
+typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
+
+static size_t HUF_compressCTable_internal(
+ BYTE* const ostart, BYTE* op, BYTE* const oend,
+ const void* src, size_t srcSize,
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
+{
+ size_t const cSize = (nbStreams==HUF_singleStream) ?
+ HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
+ HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
+ if (HUF_isError(cSize)) { return cSize; }
+ if (cSize==0) { return 0; } /* uncompressible */
+ op += cSize;
+ /* check compressibility */
+ assert(op >= ostart);
+ if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
+ return (size_t)(op-ostart);
+}
+
+typedef struct {
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
+ union {
+ HUF_buildCTable_wksp_tables buildCTable_wksp;
+ HUF_WriteCTableWksp writeCTable_wksp;
+ U32 hist_wksp[HIST_WKSP_SIZE_U32];
+ } wksps;
+} HUF_compress_tables_t;
+
+#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
+#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
+
+/* HUF_compress_internal() :
+ * `workSpace_align4` must be aligned on 4-bytes boundaries,
+ * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
+static size_t
+HUF_compress_internal (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ HUF_nbStreams_e nbStreams,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+ const int bmi2, unsigned suspectUncompressible)
+{
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart;
+
+ HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
+
+ /* checks & inits */
+ if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
+ if (!srcSize) return 0; /* Uncompressed */
+ if (!dstSize) return 0; /* cannot fit anything within dst budget */
+ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
+ if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+ if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
+ /* Heuristic : If old table is valid, use it for small inputs */
+ if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* If uncompressible data is suspected, do a smaller sampling first */
+ DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
+ if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
+ size_t largestTotal = 0;
+ { unsigned maxSymbolValueBegin = maxSymbolValue;
+ CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
+ largestTotal += largestBegin;
+ }
+ { unsigned maxSymbolValueEnd = maxSymbolValue;
+ CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
+ largestTotal += largestEnd;
+ }
+ if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
+ }
+
+ /* Scan input and build symbol stats */
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
+ if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
+ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
+ }
+
+ /* Check validity of previous table */
+ if ( repeat
+ && *repeat == HUF_repeat_check
+ && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
+ *repeat = HUF_repeat_none;
+ }
+ /* Heuristic : use existing table for small inputs */
+ if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ }
+
+ /* Build Huffman Tree */
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
+ maxSymbolValue, huffLog,
+ &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
+ CHECK_F(maxBits);
+ huffLog = (U32)maxBits;
+ }
+ /* Zero unused symbols in CTable, so we can check it for validity */
+ {
+ size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
+ size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
+ ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
+ }
+
+ /* Write table description header */
+ { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
+ &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
+ /* Check if using previous huffman table is beneficial */
+ if (repeat && *repeat != HUF_repeat_none) {
+ size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
+ size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
+ if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, oldHufTable, bmi2);
+ } }
+
+ /* Use the new huffman table */
+ if (hSize + 12ul >= srcSize) { return 0; }
+ op += hSize;
+ if (repeat) { *repeat = HUF_repeat_none; }
+ if (oldHufTable)
+ ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
+ }
+ return HUF_compressCTable_internal(ostart, op, oend,
+ src, srcSize,
+ nbStreams, table->CTable, bmi2);
+}
+
+
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/, 0);
+}
+
+size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat,
+ int bmi2, unsigned suspectUncompressible)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_singleStream,
+ workSpace, wkspSize, hufTable,
+ repeat, preferRepeat, bmi2, suspectUncompressible);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * provide workspace to generate compression tables */
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ NULL, NULL, 0, 0 /*bmi2*/, 0);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * consider skipping quickly
+ * re-use an existing huffman compression table */
+size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+ const void* src, size_t srcSize,
+ unsigned maxSymbolValue, unsigned huffLog,
+ void* workSpace, size_t wkspSize,
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible)
+{
+ return HUF_compress_internal(dst, dstSize, src, srcSize,
+ maxSymbolValue, huffLog, HUF_fourStreams,
+ workSpace, wkspSize,
+ hufTable, repeat, preferRepeat, bmi2, suspectUncompressible);
+}
+
diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
new file mode 100644
index 000000000..f620cafca
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress.c
@@ -0,0 +1,6127 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
+#include "../common/mem.h"
+#include "hist.h" /* HIST_countFast_wksp */
+#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "zstd_compress_internal.h"
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
+#include "zstd_compress_superblock.h"
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * COMPRESS_HEAPMODE :
+ * Select how default decompression function ZSTD_compress() allocates its context,
+ * on stack (0, default), or into heap (1).
+ * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
+ */
+
+/*!
+ * ZSTD_HASHLOG3_MAX :
+ * Maximum size of the hash table dedicated to find 3-bytes matches,
+ * in log format, aka 17 => 1 << 17 == 128Ki positions.
+ * This structure is only used in zstd_opt.
+ * Since allocation is centralized for all strategies, it has to be known here.
+ * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
+ * so that zstd_opt.c doesn't need to know about this constant.
+ */
+#ifndef ZSTD_HASHLOG3_MAX
+# define ZSTD_HASHLOG3_MAX 17
+#endif
+
+/*-*************************************
+* Helper functions
+***************************************/
+/* ZSTD_compressBound()
+ * Note that the result from this function is only compatible with the "normal"
+ * full-block strategy.
+ * When there are a lot of small blocks due to frequent flush in streaming mode
+ * the overhead of headers can make the compressed data to be larger than the
+ * return value of ZSTD_compressBound().
+ */
+size_t ZSTD_compressBound(size_t srcSize) {
+ return ZSTD_COMPRESSBOUND(srcSize);
+}
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+struct ZSTD_CDict_s {
+ const void* dictContent;
+ size_t dictContentSize;
+ ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
+ U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+ ZSTD_cwksp workspace;
+ ZSTD_matchState_t matchState;
+ ZSTD_compressedBlockState_t cBlockState;
+ ZSTD_customMem customMem;
+ U32 dictID;
+ int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
+ ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
+ * row-based matchfinder. Unless the cdict is reloaded, we will use
+ * the same greedy/lazy matchfinder at compression time.
+ */
+}; /* typedef'd to ZSTD_CDict within "zstd.h" */
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
+ return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
+}
+
+static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
+{
+ assert(cctx != NULL);
+ ZSTD_memset(cctx, 0, sizeof(*cctx));
+ cctx->customMem = memManager;
+ cctx->bmi2 = ZSTD_cpuSupportsBmi2();
+ { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
+ assert(!ZSTD_isError(err));
+ (void)err;
+ }
+}
+
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+ ZSTD_STATIC_ASSERT(zcss_init==0);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
+ if (!cctx) return NULL;
+ ZSTD_initCCtx(cctx, customMem);
+ return cctx;
+ }
+}
+
+ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
+{
+ ZSTD_cwksp ws;
+ ZSTD_CCtx* cctx;
+ if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
+ if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
+
+ cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
+ if (cctx == NULL) return NULL;
+
+ ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
+ ZSTD_cwksp_move(&cctx->workspace, &ws);
+ cctx->staticSize = workspaceSize;
+
+ /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+ if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
+ cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
+ cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
+ cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+ return cctx;
+}
+
+/*
+ * Clears and frees all of the dictionaries in the CCtx.
+ */
+static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
+{
+ ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
+ ZSTD_freeCDict(cctx->localDict.cdict);
+ ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
+ cctx->cdict = NULL;
+}
+
+static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
+{
+ size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
+ size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
+ return bufferSize + cdictSize;
+}
+
+static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
+{
+ assert(cctx != NULL);
+ assert(cctx->staticSize == 0);
+ ZSTD_clearAllDicts(cctx);
+ ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "not compatible with static CCtx");
+ {
+ int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
+ ZSTD_freeCCtxContent(cctx);
+ if (!cctxInWorkspace) {
+ ZSTD_customFree(cctx, cctx->customMem);
+ }
+ }
+ return 0;
+}
+
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+ (void)cctx;
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
+{
+ if (cctx==NULL) return 0; /* support sizeof on NULL */
+ /* cctx may be in the workspace */
+ return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
+ + ZSTD_cwksp_sizeof(&cctx->workspace)
+ + ZSTD_sizeof_localDict(cctx->localDict)
+ + ZSTD_sizeof_mtctx(cctx);
+}
+
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+{
+ return ZSTD_sizeof_CCtx(zcs); /* same object */
+}
+
+/* private API call, for dictBuilder only */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
+/* Returns true if the strategy supports using a row based matchfinder */
+static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
+ return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
+}
+
+/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
+ * for this compression.
+ */
+static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
+ assert(mode != ZSTD_ps_auto);
+ return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
+}
+
+/* Returns row matchfinder usage given an initial mode and cParams */
+static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
+ int const kHasSIMD128 = 1;
+#else
+ int const kHasSIMD128 = 0;
+#endif
+ if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
+ mode = ZSTD_ps_disable;
+ if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
+ if (kHasSIMD128) {
+ if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
+ } else {
+ if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
+ }
+ return mode;
+}
+
+/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
+static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode;
+ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
+}
+
+/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
+static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const U32 forDDSDict) {
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
+ * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
+ */
+ return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
+}
+
+/* Returns 1 if compression parameters are such that we should
+ * enable long distance matching (wlog >= 27, strategy >= btopt).
+ * Returns 0 otherwise.
+ */
+static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
+ const ZSTD_compressionParameters* const cParams) {
+ if (mode != ZSTD_ps_auto) return mode;
+ return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
+}
+
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params cctxParams;
+ /* should not matter, as all cParams are presumed properly defined */
+ ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
+ cctxParams.cParams = cParams;
+
+ /* Adjust advanced params according to cParams */
+ cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
+ if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
+ assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
+ assert(cctxParams.ldmParams.hashRateLog < 32);
+ }
+ cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+ assert(!ZSTD_checkCParams(cParams));
+ return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params* params;
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+ params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
+ sizeof(ZSTD_CCtx_params), customMem);
+ if (!params) { return NULL; }
+ ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+ params->customMem = customMem;
+ return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+ return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+ if (params == NULL) { return 0; }
+ ZSTD_customFree(params, params->customMem);
+ return 0;
+}
+
+size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
+{
+ return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+ RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->fParams.contentSizeFlag = 1;
+ return 0;
+}
+
+#define ZSTD_NO_CLEVEL 0
+
+/*
+ * Initializes the cctxParams from params and compressionLevel.
+ * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
+ */
+static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = compressionLevel;
+ cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
+ cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams);
+ cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams);
+ DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
+ cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
+}
+
+size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
+ RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
+ ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
+ return 0;
+}
+
+/*
+ * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
+ * @param param Validated zstd parameters.
+ */
+static void ZSTD_CCtxParams_setZstdParams(
+ ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
+{
+ assert(!ZSTD_checkCParams(params->cParams));
+ cctxParams->cParams = params->cParams;
+ cctxParams->fParams = params->fParams;
+ /* Should not matter, as all cParams are presumed properly defined.
+ * But, set it for tracing anyway.
+ */
+ cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
+}
+
+ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ bounds.lowerBound = ZSTD_minCLevel();
+ bounds.upperBound = ZSTD_maxCLevel();
+ return bounds;
+
+ case ZSTD_c_windowLog:
+ bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_hashLog:
+ bounds.lowerBound = ZSTD_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_chainLog:
+ bounds.lowerBound = ZSTD_CHAINLOG_MIN;
+ bounds.upperBound = ZSTD_CHAINLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_searchLog:
+ bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
+ bounds.upperBound = ZSTD_SEARCHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_minMatch:
+ bounds.lowerBound = ZSTD_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_targetLength:
+ bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
+ bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
+ return bounds;
+
+ case ZSTD_c_strategy:
+ bounds.lowerBound = ZSTD_STRATEGY_MIN;
+ bounds.upperBound = ZSTD_STRATEGY_MAX;
+ return bounds;
+
+ case ZSTD_c_contentSizeFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_checksumFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_dictIDFlag:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_nbWorkers:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_jobSize:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_overlapLog:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 0;
+ return bounds;
+
+ case ZSTD_c_enableDedicatedDictSearch:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_enableLongDistanceMatching:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_ldmHashLog:
+ bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmMinMatch:
+ bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
+ bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmBucketSizeLog:
+ bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
+ return bounds;
+
+ case ZSTD_c_ldmHashRateLog:
+ bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
+ bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
+ return bounds;
+
+ /* experimental parameters */
+ case ZSTD_c_rsyncable:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_forceMaxWindow :
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_format:
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ bounds.lowerBound = ZSTD_f_zstd1;
+ bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_forceAttachDict:
+ ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
+ bounds.lowerBound = ZSTD_dictDefaultAttach;
+ bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
+ return bounds;
+
+ case ZSTD_c_literalCompressionMode:
+ ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_targetCBlockSize:
+ bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
+ bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
+ return bounds;
+
+ case ZSTD_c_srcSizeHint:
+ bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
+ bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
+ return bounds;
+
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+
+ case ZSTD_c_blockDelimiters:
+ bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
+ bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
+ return bounds;
+
+ case ZSTD_c_validateSequences:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ case ZSTD_c_useBlockSplitter:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_useRowMatchFinder:
+ bounds.lowerBound = (int)ZSTD_ps_auto;
+ bounds.upperBound = (int)ZSTD_ps_disable;
+ return bounds;
+
+ case ZSTD_c_deterministicRefPrefix:
+ bounds.lowerBound = 0;
+ bounds.upperBound = 1;
+ return bounds;
+
+ default:
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+ }
+}
+
+/* ZSTD_cParam_clampBounds:
+ * Clamps the value into the bounded range.
+ */
+static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return bounds.error;
+ if (*value < bounds.lowerBound) *value = bounds.lowerBound;
+ if (*value > bounds.upperBound) *value = bounds.upperBound;
+ return 0;
+}
+
+#define BOUNDCHECK(cParam, val) { \
+ RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
+ parameter_outOfBound, "Param out of bounds"); \
+}
+
+
+static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+{
+ switch(param)
+ {
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ return 1;
+
+ case ZSTD_c_format:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow :
+ case ZSTD_c_nbWorkers:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_targetCBlockSize:
+ case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
+ default:
+ return 0;
+ }
+}
+
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
+ if (cctx->streamStage != zcss_init) {
+ if (ZSTD_isUpdateAuthorized(param)) {
+ cctx->cParamsChanged = 1;
+ } else {
+ RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
+ } }
+
+ switch(param)
+ {
+ case ZSTD_c_nbWorkers:
+ RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
+ "MT not compatible with static alloc");
+ break;
+
+ case ZSTD_c_compressionLevel:
+ case ZSTD_c_windowLog:
+ case ZSTD_c_hashLog:
+ case ZSTD_c_chainLog:
+ case ZSTD_c_searchLog:
+ case ZSTD_c_minMatch:
+ case ZSTD_c_targetLength:
+ case ZSTD_c_strategy:
+ case ZSTD_c_ldmHashRateLog:
+ case ZSTD_c_format:
+ case ZSTD_c_contentSizeFlag:
+ case ZSTD_c_checksumFlag:
+ case ZSTD_c_dictIDFlag:
+ case ZSTD_c_forceMaxWindow:
+ case ZSTD_c_forceAttachDict:
+ case ZSTD_c_literalCompressionMode:
+ case ZSTD_c_jobSize:
+ case ZSTD_c_overlapLog:
+ case ZSTD_c_rsyncable:
+ case ZSTD_c_enableDedicatedDictSearch:
+ case ZSTD_c_enableLongDistanceMatching:
+ case ZSTD_c_ldmHashLog:
+ case ZSTD_c_ldmMinMatch:
+ case ZSTD_c_ldmBucketSizeLog:
+ case ZSTD_c_targetCBlockSize:
+ case ZSTD_c_srcSizeHint:
+ case ZSTD_c_stableInBuffer:
+ case ZSTD_c_stableOutBuffer:
+ case ZSTD_c_blockDelimiters:
+ case ZSTD_c_validateSequences:
+ case ZSTD_c_useBlockSplitter:
+ case ZSTD_c_useRowMatchFinder:
+ case ZSTD_c_deterministicRefPrefix:
+ break;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
+ ZSTD_cParameter param, int value)
+{
+ DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
+ switch(param)
+ {
+ case ZSTD_c_format :
+ BOUNDCHECK(ZSTD_c_format, value);
+ CCtxParams->format = (ZSTD_format_e)value;
+ return (size_t)CCtxParams->format;
+
+ case ZSTD_c_compressionLevel : {
+ FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
+ if (value == 0)
+ CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else
+ CCtxParams->compressionLevel = value;
+ if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
+ return 0; /* return type (size_t) cannot represent negative values */
+ }
+
+ case ZSTD_c_windowLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_windowLog, value);
+ CCtxParams->cParams.windowLog = (U32)value;
+ return CCtxParams->cParams.windowLog;
+
+ case ZSTD_c_hashLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_hashLog, value);
+ CCtxParams->cParams.hashLog = (U32)value;
+ return CCtxParams->cParams.hashLog;
+
+ case ZSTD_c_chainLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_chainLog, value);
+ CCtxParams->cParams.chainLog = (U32)value;
+ return CCtxParams->cParams.chainLog;
+
+ case ZSTD_c_searchLog :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_searchLog, value);
+ CCtxParams->cParams.searchLog = (U32)value;
+ return (size_t)value;
+
+ case ZSTD_c_minMatch :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_minMatch, value);
+ CCtxParams->cParams.minMatch = value;
+ return CCtxParams->cParams.minMatch;
+
+ case ZSTD_c_targetLength :
+ BOUNDCHECK(ZSTD_c_targetLength, value);
+ CCtxParams->cParams.targetLength = value;
+ return CCtxParams->cParams.targetLength;
+
+ case ZSTD_c_strategy :
+ if (value!=0) /* 0 => use default */
+ BOUNDCHECK(ZSTD_c_strategy, value);
+ CCtxParams->cParams.strategy = (ZSTD_strategy)value;
+ return (size_t)CCtxParams->cParams.strategy;
+
+ case ZSTD_c_contentSizeFlag :
+ /* Content size written in frame header _when known_ (default:1) */
+ DEBUGLOG(4, "set content size flag = %u", (value!=0));
+ CCtxParams->fParams.contentSizeFlag = value != 0;
+ return CCtxParams->fParams.contentSizeFlag;
+
+ case ZSTD_c_checksumFlag :
+ /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
+ CCtxParams->fParams.checksumFlag = value != 0;
+ return CCtxParams->fParams.checksumFlag;
+
+ case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+ DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
+ CCtxParams->fParams.noDictIDFlag = !value;
+ return !CCtxParams->fParams.noDictIDFlag;
+
+ case ZSTD_c_forceMaxWindow :
+ CCtxParams->forceWindow = (value != 0);
+ return CCtxParams->forceWindow;
+
+ case ZSTD_c_forceAttachDict : {
+ const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
+ BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
+ CCtxParams->attachDictPref = pref;
+ return CCtxParams->attachDictPref;
+ }
+
+ case ZSTD_c_literalCompressionMode : {
+ const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
+ BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
+ CCtxParams->literalCompressionMode = lcm;
+ return CCtxParams->literalCompressionMode;
+ }
+
+ case ZSTD_c_nbWorkers :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_jobSize :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_overlapLog :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_rsyncable :
+ RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
+ return 0;
+
+ case ZSTD_c_enableDedicatedDictSearch :
+ CCtxParams->enableDedicatedDictSearch = (value!=0);
+ return CCtxParams->enableDedicatedDictSearch;
+
+ case ZSTD_c_enableLongDistanceMatching :
+ CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->ldmParams.enableLdm;
+
+ case ZSTD_c_ldmHashLog :
+ if (value!=0) /* 0 ==> auto */
+ BOUNDCHECK(ZSTD_c_ldmHashLog, value);
+ CCtxParams->ldmParams.hashLog = value;
+ return CCtxParams->ldmParams.hashLog;
+
+ case ZSTD_c_ldmMinMatch :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
+ CCtxParams->ldmParams.minMatchLength = value;
+ return CCtxParams->ldmParams.minMatchLength;
+
+ case ZSTD_c_ldmBucketSizeLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
+ CCtxParams->ldmParams.bucketSizeLog = value;
+ return CCtxParams->ldmParams.bucketSizeLog;
+
+ case ZSTD_c_ldmHashRateLog :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
+ CCtxParams->ldmParams.hashRateLog = value;
+ return CCtxParams->ldmParams.hashRateLog;
+
+ case ZSTD_c_targetCBlockSize :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
+ CCtxParams->targetCBlockSize = value;
+ return CCtxParams->targetCBlockSize;
+
+ case ZSTD_c_srcSizeHint :
+ if (value!=0) /* 0 ==> default */
+ BOUNDCHECK(ZSTD_c_srcSizeHint, value);
+ CCtxParams->srcSizeHint = value;
+ return CCtxParams->srcSizeHint;
+
+ case ZSTD_c_stableInBuffer:
+ BOUNDCHECK(ZSTD_c_stableInBuffer, value);
+ CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->inBufferMode;
+
+ case ZSTD_c_stableOutBuffer:
+ BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
+ CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
+ return CCtxParams->outBufferMode;
+
+ case ZSTD_c_blockDelimiters:
+ BOUNDCHECK(ZSTD_c_blockDelimiters, value);
+ CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
+ return CCtxParams->blockDelimiters;
+
+ case ZSTD_c_validateSequences:
+ BOUNDCHECK(ZSTD_c_validateSequences, value);
+ CCtxParams->validateSequences = value;
+ return CCtxParams->validateSequences;
+
+ case ZSTD_c_useBlockSplitter:
+ BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
+ CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->useBlockSplitter;
+
+ case ZSTD_c_useRowMatchFinder:
+ BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
+ CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
+ return CCtxParams->useRowMatchFinder;
+
+ case ZSTD_c_deterministicRefPrefix:
+ BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
+ CCtxParams->deterministicRefPrefix = !!value;
+ return CCtxParams->deterministicRefPrefix;
+
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+}
+
+size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
+{
+ return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
+}
+
+size_t ZSTD_CCtxParams_getParameter(
+ ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
+{
+ switch(param)
+ {
+ case ZSTD_c_format :
+ *value = CCtxParams->format;
+ break;
+ case ZSTD_c_compressionLevel :
+ *value = CCtxParams->compressionLevel;
+ break;
+ case ZSTD_c_windowLog :
+ *value = (int)CCtxParams->cParams.windowLog;
+ break;
+ case ZSTD_c_hashLog :
+ *value = (int)CCtxParams->cParams.hashLog;
+ break;
+ case ZSTD_c_chainLog :
+ *value = (int)CCtxParams->cParams.chainLog;
+ break;
+ case ZSTD_c_searchLog :
+ *value = CCtxParams->cParams.searchLog;
+ break;
+ case ZSTD_c_minMatch :
+ *value = CCtxParams->cParams.minMatch;
+ break;
+ case ZSTD_c_targetLength :
+ *value = CCtxParams->cParams.targetLength;
+ break;
+ case ZSTD_c_strategy :
+ *value = (unsigned)CCtxParams->cParams.strategy;
+ break;
+ case ZSTD_c_contentSizeFlag :
+ *value = CCtxParams->fParams.contentSizeFlag;
+ break;
+ case ZSTD_c_checksumFlag :
+ *value = CCtxParams->fParams.checksumFlag;
+ break;
+ case ZSTD_c_dictIDFlag :
+ *value = !CCtxParams->fParams.noDictIDFlag;
+ break;
+ case ZSTD_c_forceMaxWindow :
+ *value = CCtxParams->forceWindow;
+ break;
+ case ZSTD_c_forceAttachDict :
+ *value = CCtxParams->attachDictPref;
+ break;
+ case ZSTD_c_literalCompressionMode :
+ *value = CCtxParams->literalCompressionMode;
+ break;
+ case ZSTD_c_nbWorkers :
+ assert(CCtxParams->nbWorkers == 0);
+ *value = CCtxParams->nbWorkers;
+ break;
+ case ZSTD_c_jobSize :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_overlapLog :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_rsyncable :
+ RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
+ case ZSTD_c_enableDedicatedDictSearch :
+ *value = CCtxParams->enableDedicatedDictSearch;
+ break;
+ case ZSTD_c_enableLongDistanceMatching :
+ *value = CCtxParams->ldmParams.enableLdm;
+ break;
+ case ZSTD_c_ldmHashLog :
+ *value = CCtxParams->ldmParams.hashLog;
+ break;
+ case ZSTD_c_ldmMinMatch :
+ *value = CCtxParams->ldmParams.minMatchLength;
+ break;
+ case ZSTD_c_ldmBucketSizeLog :
+ *value = CCtxParams->ldmParams.bucketSizeLog;
+ break;
+ case ZSTD_c_ldmHashRateLog :
+ *value = CCtxParams->ldmParams.hashRateLog;
+ break;
+ case ZSTD_c_targetCBlockSize :
+ *value = (int)CCtxParams->targetCBlockSize;
+ break;
+ case ZSTD_c_srcSizeHint :
+ *value = (int)CCtxParams->srcSizeHint;
+ break;
+ case ZSTD_c_stableInBuffer :
+ *value = (int)CCtxParams->inBufferMode;
+ break;
+ case ZSTD_c_stableOutBuffer :
+ *value = (int)CCtxParams->outBufferMode;
+ break;
+ case ZSTD_c_blockDelimiters :
+ *value = (int)CCtxParams->blockDelimiters;
+ break;
+ case ZSTD_c_validateSequences :
+ *value = (int)CCtxParams->validateSequences;
+ break;
+ case ZSTD_c_useBlockSplitter :
+ *value = (int)CCtxParams->useBlockSplitter;
+ break;
+ case ZSTD_c_useRowMatchFinder :
+ *value = (int)CCtxParams->useRowMatchFinder;
+ break;
+ case ZSTD_c_deterministicRefPrefix:
+ *value = (int)CCtxParams->deterministicRefPrefix;
+ break;
+ default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
+ }
+ return 0;
+}
+
+/* ZSTD_CCtx_setParametersUsingCCtxParams() :
+ * just applies `params` into `cctx`
+ * no action is performed, parameters are merely stored.
+ * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
+ * This is possible even if a compression is ongoing.
+ * In which case, new parameters will be applied on the fly, starting with next compression job.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+ ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "The context is in the wrong stage!");
+ RETURN_ERROR_IF(cctx->cdict, stage_wrong,
+ "Can't override parameters with cdict attached (some must "
+ "be inherited from the cdict).");
+
+ cctx->requestedParams = *params;
+ return 0;
+}
+
+size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't set pledgedSrcSize when not in init stage.");
+ cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ return 0;
+}
+
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
+ int const compressionLevel,
+ size_t const dictSize);
+static int ZSTD_dedicatedDictSearch_isSupported(
+ const ZSTD_compressionParameters* cParams);
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams);
+
+/*
+ * Initializes the local dict using the requested parameters.
+ * NOTE: This does not use the pledged src size, because it may be used for more
+ * than one compression.
+ */
+static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
+{
+ ZSTD_localDict* const dl = &cctx->localDict;
+ if (dl->dict == NULL) {
+ /* No local dictionary. */
+ assert(dl->dictBuffer == NULL);
+ assert(dl->cdict == NULL);
+ assert(dl->dictSize == 0);
+ return 0;
+ }
+ if (dl->cdict != NULL) {
+ assert(cctx->cdict == dl->cdict);
+ /* Local dictionary already initialized. */
+ return 0;
+ }
+ assert(dl->dictSize > 0);
+ assert(cctx->cdict == NULL);
+ assert(cctx->prefixDict.dict == NULL);
+
+ dl->cdict = ZSTD_createCDict_advanced2(
+ dl->dict,
+ dl->dictSize,
+ ZSTD_dlm_byRef,
+ dl->dictContentType,
+ &cctx->requestedParams,
+ cctx->customMem);
+ RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
+ cctx->cdict = dl->cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_advanced(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't load a dictionary when ctx is not in init stage.");
+ DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
+ ZSTD_clearAllDicts(cctx); /* in case one already exists */
+ if (dict == NULL || dictSize == 0) /* no dictionary mode */
+ return 0;
+ if (dictLoadMethod == ZSTD_dlm_byRef) {
+ cctx->localDict.dict = dict;
+ } else {
+ void* dictBuffer;
+ RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
+ "no malloc for static CCtx");
+ dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
+ RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
+ ZSTD_memcpy(dictBuffer, dict, dictSize);
+ cctx->localDict.dictBuffer = dictBuffer;
+ cctx->localDict.dict = dictBuffer;
+ }
+ cctx->localDict.dictSize = dictSize;
+ cctx->localDict.dictContentType = dictContentType;
+ return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_byReference(
+ ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_CCtx_loadDictionary_advanced(
+ cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+
+size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a dict when ctx not in init stage.");
+ /* Free the existing local cdict (if any) to save memory. */
+ ZSTD_clearAllDicts(cctx);
+ cctx->cdict = cdict;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a pool when ctx not in init stage.");
+ cctx->pool = pool;
+ return 0;
+}
+
+size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+ ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't ref a prefix when ctx not in init stage.");
+ ZSTD_clearAllDicts(cctx);
+ if (prefix != NULL && prefixSize > 0) {
+ cctx->prefixDict.dict = prefix;
+ cctx->prefixDict.dictSize = prefixSize;
+ cctx->prefixDict.dictContentType = dictContentType;
+ }
+ return 0;
+}
+
+/*! ZSTD_CCtx_reset() :
+ * Also dumps dictionary */
+size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ cctx->streamStage = zcss_init;
+ cctx->pledgedSrcSizePlusOne = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
+ "Can't reset parameters only when not in init stage.");
+ ZSTD_clearAllDicts(cctx);
+ return ZSTD_CCtxParams_reset(&cctx->requestedParams);
+ }
+ return 0;
+}
+
+
+/* ZSTD_checkCParams() :
+ control CParam values remain within authorized range.
+ @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+ BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
+ BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
+ BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
+ BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
+ BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
+ BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
+ BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
+ return 0;
+}
+
+/* ZSTD_clampCParams() :
+ * make CParam values within valid range.
+ * @return : valid CParams */
+static ZSTD_compressionParameters
+ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+{
+# define CLAMP_TYPE(cParam, val, type) { \
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
+ if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
+ else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
+ }
+# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
+ CLAMP(ZSTD_c_windowLog, cParams.windowLog);
+ CLAMP(ZSTD_c_chainLog, cParams.chainLog);
+ CLAMP(ZSTD_c_hashLog, cParams.hashLog);
+ CLAMP(ZSTD_c_searchLog, cParams.searchLog);
+ CLAMP(ZSTD_c_minMatch, cParams.minMatch);
+ CLAMP(ZSTD_c_targetLength,cParams.targetLength);
+ CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
+ return cParams;
+}
+
+/* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+ return hashLog - btScale;
+}
+
+/* ZSTD_dictAndWindowLog() :
+ * Returns an adjusted window log that is large enough to fit the source and the dictionary.
+ * The zstd format says that the entire dictionary is valid if one byte of the dictionary
+ * is within the window. So the hashLog and chainLog should be large enough to reference both
+ * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
+ * the hashLog and windowLog.
+ * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
+ */
+static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
+{
+ const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
+ /* No dictionary ==> No change */
+ if (dictSize == 0) {
+ return windowLog;
+ }
+ assert(windowLog <= ZSTD_WINDOWLOG_MAX);
+ assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
+ {
+ U64 const windowSize = 1ULL << windowLog;
+ U64 const dictAndWindowSize = dictSize + windowSize;
+ /* If the window size is already large enough to fit both the source and the dictionary
+ * then just use the window size. Otherwise adjust so that it fits the dictionary and
+ * the window.
+ */
+ if (windowSize >= dictSize + srcSize) {
+ return windowLog; /* Window size large enough already */
+ } else if (dictAndWindowSize >= maxWindowSize) {
+ return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
+ } else {
+ return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
+ }
+ }
+}
+
+/* ZSTD_adjustCParams_internal() :
+ * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
+ * mostly downsize to reduce memory consumption and initialization latency.
+ * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
+ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
+ * note : `srcSize==0` means 0!
+ * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
+static ZSTD_compressionParameters
+ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize,
+ ZSTD_cParamMode_e mode)
+{
+ const U64 minSrcSize = 513; /* (1<<9) + 1 */
+ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
+ assert(ZSTD_checkCParams(cPar)==0);
+
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ /* If we don't know the source size, don't make any
+ * assumptions about it. We will already have selected
+ * smaller parameters if a dictionary is in use.
+ */
+ break;
+ case ZSTD_cpm_createCDict:
+ /* Assume a small source size when creating a dictionary
+ * with an unknown source size.
+ */
+ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ srcSize = minSrcSize;
+ break;
+ case ZSTD_cpm_attachDict:
+ /* Dictionary has its own dedicated parameters which have
+ * already been selected. We are selecting parameters
+ * for only the source.
+ */
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* resize windowLog if input is small enough, to use less memory */
+ if ( (srcSize < maxWindowResize)
+ && (dictSize < maxWindowResize) ) {
+ U32 const tSize = (U32)(srcSize + dictSize);
+ static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+ U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+ ZSTD_highbit32(tSize-1) + 1;
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+ }
+ if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
+ U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+ if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
+ if (cycleLog > dictAndWindowLog)
+ cPar.chainLog -= (cycleLog - dictAndWindowLog);
+ }
+
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+ cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
+
+ return cPar;
+}
+
+ZSTD_compressionParameters
+ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
+ unsigned long long srcSize,
+ size_t dictSize)
+{
+ cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
+ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
+}
+
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+static void ZSTD_overrideCParams(
+ ZSTD_compressionParameters* cParams,
+ const ZSTD_compressionParameters* overrides)
+{
+ if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
+ if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
+ if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
+ if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
+ if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
+ if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
+ if (overrides->strategy) cParams->strategy = overrides->strategy;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ ZSTD_compressionParameters cParams;
+ if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
+ srcSizeHint = CCtxParams->srcSizeHint;
+ }
+ cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
+ if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+ ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
+ assert(!ZSTD_checkCParams(cParams));
+ /* srcSizeHint == 0 means 0 */
+ return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
+}
+
+static size_t
+ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const U32 enableDedicatedDictSearch,
+ const U32 forCCtx)
+{
+ /* chain table size should be 0 for fast or row-hash strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+ /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
+ * surrounded by redzones in ASAN. */
+ size_t const tableSpace = chainSize * sizeof(U32)
+ + hSize * sizeof(U32)
+ + h3Size * sizeof(U32);
+ size_t const optPotentialSpace =
+ ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
+ ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
+ : 0;
+ size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
+ ? optPotentialSpace
+ : 0;
+ size_t const slackSpace = ZSTD_cwksp_slack_space_required();
+
+ /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
+ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+
+ DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
+ (U32)chainSize, (U32)hSize, (U32)h3Size);
+ return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
+}
+
+static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ const ZSTD_compressionParameters* cParams,
+ const ldmParams_t* ldmParams,
+ const int isStatic,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const size_t buffInSize,
+ const size_t buffOutSize,
+ const U64 pledgedSrcSize)
+{
+ size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (cParams->minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
+ + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
+ + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
+ size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
+ size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
+ size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
+
+ size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
+ size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
+ ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
+
+
+ size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
+ + ZSTD_cwksp_alloc_size(buffOutSize);
+
+ size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
+
+ size_t const neededSpace =
+ cctxSpace +
+ entropySpace +
+ blockStateSpace +
+ ldmSpace +
+ ldmSeqSpace +
+ matchStateSize +
+ tokenSpace +
+ bufferSpace;
+
+ DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
+ return neededSpace;
+}
+
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
+ &cParams);
+
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ /* estimateCCtxSize is for one-shot compression. So no buffers should
+ * be needed. However, we still allocate two 0-sized buffers, which can
+ * take space under ASAN. */
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_ps_disable;
+ noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_ps_enable;
+ rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
+ }
+}
+
+static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
+{
+ int tier = 0;
+ size_t largestSize = 0;
+ static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
+ for (; tier < 4; ++tier) {
+ /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
+ largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
+ }
+ return largestSize;
+}
+
+size_t ZSTD_estimateCCtxSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ /* Ensure monotonically increasing memory usage as compression level increases */
+ size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+ RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
+ { ZSTD_compressionParameters const cParams =
+ ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+ size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
+ ? ((size_t)1 << cParams.windowLog) + blockSize
+ : 0;
+ size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
+
+ return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
+ ZSTD_CONTENTSIZE_UNKNOWN);
+ }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+ ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
+ if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
+ /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
+ size_t noRowCCtxSize;
+ size_t rowCCtxSize;
+ initialParams.useRowMatchFinder = ZSTD_ps_disable;
+ noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ initialParams.useRowMatchFinder = ZSTD_ps_enable;
+ rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ return MAX(noRowCCtxSize, rowCCtxSize);
+ } else {
+ return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
+ }
+}
+
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
+ return ZSTD_estimateCStreamSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCStreamSize(int compressionLevel)
+{
+ int level;
+ size_t memBudget = 0;
+ for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
+ size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
+ if (newMB > memBudget) memBudget = newMB;
+ }
+ return memBudget;
+}
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads (non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
+{
+ { ZSTD_frameProgression fp;
+ size_t const buffered = (cctx->inBuff == NULL) ? 0 :
+ cctx->inBuffPos - cctx->inToCompress;
+ if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
+ assert(buffered <= ZSTD_BLOCKSIZE_MAX);
+ fp.ingested = cctx->consumedSrcSize + buffered;
+ fp.consumed = cctx->consumedSrcSize;
+ fp.produced = cctx->producedCSize;
+ fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
+ fp.currentJobID = 0;
+ fp.nbActiveWorkers = 0;
+ return fp;
+} }
+
+/*! ZSTD_toFlushNow()
+ * Only useful for multithreading scenarios currently (nbWorkers >= 1).
+ */
+size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
+{
+ (void)cctx;
+ return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
+}
+
+static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
+ ZSTD_compressionParameters cParams2)
+{
+ (void)cParams1;
+ (void)cParams2;
+ assert(cParams1.windowLog == cParams2.windowLog);
+ assert(cParams1.chainLog == cParams2.chainLog);
+ assert(cParams1.hashLog == cParams2.hashLog);
+ assert(cParams1.searchLog == cParams2.searchLog);
+ assert(cParams1.minMatch == cParams2.minMatch);
+ assert(cParams1.targetLength == cParams2.targetLength);
+ assert(cParams1.strategy == cParams2.strategy);
+}
+
+void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
+{
+ int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ bs->rep[i] = repStartValue[i];
+ bs->entropy.huf.repeatMode = HUF_repeat_none;
+ bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
+ bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
+}
+
+/*! ZSTD_invalidateMatchState()
+ * Invalidate all the matches in the match finder tables.
+ * Requires nextSrc and base to be set (can be NULL).
+ */
+static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+{
+ ZSTD_window_clear(&ms->window);
+
+ ms->nextToUpdate = ms->window.dictLimit;
+ ms->loadedDictEnd = 0;
+ ms->opt.litLengthSum = 0; /* force reset of btopt stats */
+ ms->dictMatchState = NULL;
+}
+
+/*
+ * Controls, for this matchState reset, whether the tables need to be cleared /
+ * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
+ * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
+ * subsequent operation will overwrite the table space anyways (e.g., copying
+ * the matchState contents in from a CDict).
+ */
+typedef enum {
+ ZSTDcrp_makeClean,
+ ZSTDcrp_leaveDirty
+} ZSTD_compResetPolicy_e;
+
+/*
+ * Controls, for this matchState reset, whether indexing can continue where it
+ * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
+ * (ZSTDirp_reset).
+ */
+typedef enum {
+ ZSTDirp_continue,
+ ZSTDirp_reset
+} ZSTD_indexResetPolicy_e;
+
+typedef enum {
+ ZSTD_resetTarget_CDict,
+ ZSTD_resetTarget_CCtx
+} ZSTD_resetTarget_e;
+
+
+static size_t
+ZSTD_reset_matchState(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ const ZSTD_compressionParameters* cParams,
+ const ZSTD_paramSwitch_e useRowMatchFinder,
+ const ZSTD_compResetPolicy_e crp,
+ const ZSTD_indexResetPolicy_e forceResetIndex,
+ const ZSTD_resetTarget_e forWho)
+{
+ /* disable chain table allocation for fast or row-based strategies */
+ size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
+ ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
+ ? ((size_t)1 << cParams->chainLog)
+ : 0;
+ size_t const hSize = ((size_t)1) << cParams->hashLog;
+ U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+ size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
+
+ DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ if (forceResetIndex == ZSTDirp_reset) {
+ ZSTD_window_init(&ms->window);
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ }
+
+ ms->hashLog3 = hashLog3;
+
+ ZSTD_invalidateMatchState(ms);
+
+ assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
+
+ ZSTD_cwksp_clear_tables(ws);
+
+ DEBUGLOG(5, "reserving table space");
+ /* table Space */
+ ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
+ ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
+ ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+
+ DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
+ if (crp!=ZSTDcrp_leaveDirty) {
+ /* reset tables only */
+ ZSTD_cwksp_clean_tables(ws);
+ }
+
+ /* opt parser space */
+ if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
+ DEBUGLOG(4, "reserving optimal parser space");
+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
+ ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
+ }
+
+ if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
+ { /* Row match finder needs an additional table of hashes ("tags") */
+ size_t const tagTableSize = hSize*sizeof(U16);
+ ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
+ if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ }
+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
+ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
+ assert(cParams->hashLog >= rowLog);
+ ms->rowHashLog = cParams->hashLog - rowLog;
+ }
+ }
+
+ ms->cParams = *cParams;
+
+ RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
+ "failed a workspace allocation in ZSTD_reset_matchState");
+ return 0;
+}
+
+/* ZSTD_indexTooCloseToMax() :
+ * minor optimization : prefer memset() rather than reduceIndex()
+ * which is measurably slow in some circumstances (reported for Visual Studio).
+ * Works when re-using a context for a lot of smallish inputs :
+ * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
+ * memset() will be triggered before reduceIndex().
+ */
+#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
+static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
+{
+ return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
+}
+
+/* ZSTD_dictTooBig():
+ * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
+ * one go generically. So we ensure that in that case we reset the tables to zero,
+ * so that we can load as much of the dictionary as possible.
+ */
+static int ZSTD_dictTooBig(size_t const loadedDictSize)
+{
+ return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
+}
+
+/*! ZSTD_resetCCtx_internal() :
+ * @param loadedDictSize The size of the dictionary to be loaded
+ * into the context, if any. If no dictionary is used, or the
+ * dictionary is being attached / copied, then pass 0.
+ * note : `params` are assumed fully validated at this stage.
+ */
+static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+ ZSTD_CCtx_params const* params,
+ U64 const pledgedSrcSize,
+ size_t const loadedDictSize,
+ ZSTD_compResetPolicy_e const crp,
+ ZSTD_buffered_policy_e const zbuff)
+{
+ ZSTD_cwksp* const ws = &zc->workspace;
+ DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
+ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+
+ zc->isFirstBlock = 1;
+
+ /* Set applied params early so we can modify them for LDM,
+ * and point params at the applied params.
+ */
+ zc->appliedParams = *params;
+ params = &zc->appliedParams;
+
+ assert(params->useRowMatchFinder != ZSTD_ps_auto);
+ assert(params->useBlockSplitter != ZSTD_ps_auto);
+ assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* Adjust long distance matching parameters */
+ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
+ assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
+ assert(params->ldmParams.hashRateLog < 32);
+ }
+
+ { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+ U32 const divider = (params->cParams.minMatch==3) ? 3 : 4;
+ size_t const maxNbSeq = blockSize / divider;
+ size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
+ ? ZSTD_compressBound(blockSize) + 1
+ : 0;
+ size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
+ ? windowSize + blockSize
+ : 0;
+ size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
+
+ int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
+ int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
+ ZSTD_indexResetPolicy_e needsIndexReset =
+ (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
+
+ size_t const neededSpace =
+ ZSTD_estimateCCtxSize_usingCCtxParams_internal(
+ &params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
+ buffInSize, buffOutSize, pledgedSrcSize);
+ int resizeWorkspace;
+
+ FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
+
+ if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
+
+ { /* Check if workspace is large enough, alloc a new one if needed */
+ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
+ int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
+ resizeWorkspace = workspaceTooSmall || workspaceWasteful;
+ DEBUGLOG(4, "Need %zu B workspace", neededSpace);
+ DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
+
+ if (resizeWorkspace) {
+ DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
+ ZSTD_cwksp_sizeof(ws) >> 10,
+ neededSpace >> 10);
+
+ RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
+
+ needsIndexReset = ZSTDirp_reset;
+
+ ZSTD_cwksp_free(ws, zc->customMem);
+ FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
+
+ DEBUGLOG(5, "reserving object space");
+ /* Statically sized space.
+ * entropyWorkspace never moves,
+ * though prev/next block swap places */
+ assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
+ zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
+ zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
+ RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
+ zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
+ RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
+ } }
+
+ ZSTD_cwksp_clear(ws);
+
+ /* init params */
+ zc->blockState.matchState.cParams = params->cParams;
+ zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+ zc->consumedSrcSize = 0;
+ zc->producedCSize = 0;
+ if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+ zc->appliedParams.fParams.contentSizeFlag = 0;
+ DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+ (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+ zc->blockSize = blockSize;
+
+ xxh64_reset(&zc->xxhState, 0);
+ zc->stage = ZSTDcs_init;
+ zc->dictID = 0;
+ zc->dictContentSize = 0;
+
+ ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+
+ /* ZSTD_wildcopy() is used to copy into the literals buffer,
+ * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
+ */
+ zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
+ zc->seqStore.maxNbLit = blockSize;
+
+ /* buffers */
+ zc->bufferedPolicy = zbuff;
+ zc->inBuffSize = buffInSize;
+ zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
+ zc->outBuffSize = buffOutSize;
+ zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
+
+ /* ldm bucketOffsets table */
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* TODO: avoid memset? */
+ size_t const numBuckets =
+ ((size_t)1) << (params->ldmParams.hashLog -
+ params->ldmParams.bucketSizeLog);
+ zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
+ ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
+ }
+
+ /* sequences storage */
+ ZSTD_referenceExternalSequences(zc, NULL, 0);
+ zc->seqStore.maxNbSeq = maxNbSeq;
+ zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
+ zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
+
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &zc->blockState.matchState,
+ ws,
+ &params->cParams,
+ params->useRowMatchFinder,
+ crp,
+ needsIndexReset,
+ ZSTD_resetTarget_CCtx), "");
+
+ /* ldm hash table */
+ if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
+ /* TODO: avoid memset? */
+ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
+ ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
+ zc->maxNbLdmSequences = maxNbLdmSeq;
+
+ ZSTD_window_init(&zc->ldmState.window);
+ zc->ldmState.loadedDictEnd = 0;
+ }
+
+ DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
+ assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace));
+
+ zc->initialized = 1;
+
+ return 0;
+ }
+}
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ * do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+ int i;
+ for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
+ assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+}
+
+/* These are the approximate sizes for each strategy past which copying the
+ * dictionary tables into the working context is faster than using them
+ * in-place.
+ */
+static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
+ 8 KB, /* unused */
+ 8 KB, /* ZSTD_fast */
+ 16 KB, /* ZSTD_dfast */
+ 32 KB, /* ZSTD_greedy */
+ 32 KB, /* ZSTD_lazy */
+ 32 KB, /* ZSTD_lazy2 */
+ 32 KB, /* ZSTD_btlazy2 */
+ 32 KB, /* ZSTD_btopt */
+ 8 KB, /* ZSTD_btultra */
+ 8 KB /* ZSTD_btultra2 */
+};
+
+static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize)
+{
+ size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
+ int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
+ return dedicatedDictSearch
+ || ( ( pledgedSrcSize <= cutoff
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || params->attachDictPref == ZSTD_dictForceAttach )
+ && params->attachDictPref != ZSTD_dictForceCopy
+ && !params->forceWindow ); /* dictMatchState isn't correctly
+ * handled in _enforceMaxDist */
+}
+
+static size_t
+ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
+ {
+ ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
+ unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Resize working context table params for input only, since the dict
+ * has its own tables. */
+ /* pledgedSrcSize == 0 means 0! */
+
+ if (cdict->matchState.dedicatedDictSearch) {
+ ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
+ }
+
+ params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
+ cdict->dictContentSize, ZSTD_cpm_attachDict);
+ params.cParams.windowLog = windowLog;
+ params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
+ ZSTDcrp_makeClean, zbuff), "");
+ assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
+ }
+
+ { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
+ - cdict->matchState.window.base);
+ const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
+ if (cdictLen == 0) {
+ /* don't even attach dictionaries with no contents */
+ DEBUGLOG(4, "skipping attaching empty dictionary");
+ } else {
+ DEBUGLOG(4, "attaching dictionary into context");
+ cctx->blockState.matchState.dictMatchState = &cdict->matchState;
+
+ /* prep working match state so dict matches never have negative indices
+ * when they are translated to the working context's index space. */
+ if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
+ cctx->blockState.matchState.window.nextSrc =
+ cctx->blockState.matchState.window.base + cdictEnd;
+ ZSTD_window_clear(&cctx->blockState.matchState.window);
+ }
+ /* loadedDictEnd is expressed within the referential of the active context */
+ cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
+ } }
+
+ cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ ZSTD_CCtx_params params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
+
+ assert(!cdict->matchState.dedicatedDictSearch);
+ DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
+ (unsigned long long)pledgedSrcSize);
+
+ { unsigned const windowLog = params.cParams.windowLog;
+ assert(windowLog != 0);
+ /* Copy only compression parameters related to tables. */
+ params.cParams = *cdict_cParams;
+ params.cParams.windowLog = windowLog;
+ params.useRowMatchFinder = cdict->useRowMatchFinder;
+ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
+ ZSTDcrp_leaveDirty, zbuff), "");
+ assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
+ assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
+ assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
+ }
+
+ ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
+ assert(params.useRowMatchFinder != ZSTD_ps_auto);
+
+ /* copy tables */
+ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
+ ? ((size_t)1 << cdict_cParams->chainLog)
+ : 0;
+ size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
+
+ ZSTD_memcpy(cctx->blockState.matchState.hashTable,
+ cdict->matchState.hashTable,
+ hSize * sizeof(U32));
+ /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
+ if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
+ ZSTD_memcpy(cctx->blockState.matchState.chainTable,
+ cdict->matchState.chainTable,
+ chainSize * sizeof(U32));
+ }
+ /* copy tag table */
+ if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
+ size_t const tagTableSize = hSize*sizeof(U16);
+ ZSTD_memcpy(cctx->blockState.matchState.tagTable,
+ cdict->matchState.tagTable,
+ tagTableSize);
+ }
+ }
+
+ /* Zero the hashTable3, since the cdict never fills it */
+ { int const h3log = cctx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+ assert(cdict->matchState.hashLog3 == 0);
+ ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
+ }
+
+ ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
+
+ /* copy dictionary offsets */
+ { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+ ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+
+ cctx->dictID = cdict->dictID;
+ cctx->dictContentSize = cdict->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+ return 0;
+}
+
+/* We have a choice between copying the dictionary context into the working
+ * context, or referencing the dictionary context from the working context
+ * in-place. We decide here which strategy to use. */
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+
+ DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
+ (unsigned)pledgedSrcSize);
+
+ if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
+ return ZSTD_resetCCtx_byAttachingCDict(
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
+ } else {
+ return ZSTD_resetCCtx_byCopyingCDict(
+ cctx, cdict, *params, pledgedSrcSize, zbuff);
+ }
+}
+
+/*! ZSTD_copyCCtx_internal() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * The "context", in this case, refers to the hash and chain tables,
+ * entropy tables, and dictionary references.
+ * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
+ * @return : 0, or an error code */
+static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+ const ZSTD_CCtx* srcCCtx,
+ ZSTD_frameParameters fParams,
+ U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
+ "Can't copy a ctx that's not in init stage.");
+ DEBUGLOG(5, "ZSTD_copyCCtx_internal");
+ ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+ { ZSTD_CCtx_params params = dstCCtx->requestedParams;
+ /* Copy only compression parameters related to tables. */
+ params.cParams = srcCCtx->appliedParams.cParams;
+ assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
+ assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
+ params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
+ params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
+ params.ldmParams = srcCCtx->appliedParams.ldmParams;
+ params.fParams = fParams;
+ ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
+ /* loadedDictSize */ 0,
+ ZSTDcrp_leaveDirty, zbuff);
+ assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
+ assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
+ assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
+ assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
+ assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
+ }
+
+ ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
+
+ /* copy tables */
+ { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
+ srcCCtx->appliedParams.useRowMatchFinder,
+ 0 /* forDDSDict */)
+ ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
+ : 0;
+ size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
+ int const h3log = srcCCtx->blockState.matchState.hashLog3;
+ size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
+
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
+ srcCCtx->blockState.matchState.hashTable,
+ hSize * sizeof(U32));
+ ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
+ srcCCtx->blockState.matchState.chainTable,
+ chainSize * sizeof(U32));
+ ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
+ srcCCtx->blockState.matchState.hashTable3,
+ h3Size * sizeof(U32));
+ }
+
+ ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
+
+ /* copy dictionary offsets */
+ {
+ const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
+ ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+ dstMatchState->window = srcMatchState->window;
+ dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+ dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+ }
+ dstCCtx->dictID = srcCCtx->dictID;
+ dstCCtx->dictContentSize = srcCCtx->dictContentSize;
+
+ /* copy block state */
+ ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
+
+ return 0;
+}
+
+/*! ZSTD_copyCCtx() :
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ * pledgedSrcSize==0 means "unknown".
+* @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+ ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
+ ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
+ if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
+
+ return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
+ fParams, pledgedSrcSize,
+ zbuff);
+}
+
+
+#define ZSTD_ROWSIZE 16
+/*! ZSTD_reduceTable() :
+ * reduce table indexes by `reducerValue`, or squash to zero.
+ * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
+ * It must be set to a clear 0/1 value, to remove branch during inlining.
+ * Presume table size is a multiple of ZSTD_ROWSIZE
+ * to help auto-vectorization */
+FORCE_INLINE_TEMPLATE void
+ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
+{
+ int const nbRows = (int)size / ZSTD_ROWSIZE;
+ int cellNb = 0;
+ int rowNb;
+ /* Protect special index values < ZSTD_WINDOW_START_INDEX. */
+ U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
+ assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
+ assert(size < (1U<<31)); /* can be casted to int */
+
+
+ for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
+ int column;
+ for (column=0; column<ZSTD_ROWSIZE; column++) {
+ U32 newVal;
+ if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
+ /* This write is pointless, but is required(?) for the compiler
+ * to auto-vectorize the loop. */
+ newVal = ZSTD_DUBT_UNSORTED_MARK;
+ } else if (table[cellNb] < reducerThreshold) {
+ newVal = 0;
+ } else {
+ newVal = table[cellNb] - reducerValue;
+ }
+ table[cellNb] = newVal;
+ cellNb++;
+ } }
+}
+
+static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 0);
+}
+
+static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
+{
+ ZSTD_reduceTable_internal(table, size, reducerValue, 1);
+}
+
+/*! ZSTD_reduceIndex() :
+* rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
+{
+ { U32 const hSize = (U32)1 << params->cParams.hashLog;
+ ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
+ }
+
+ if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
+ U32 const chainSize = (U32)1 << params->cParams.chainLog;
+ if (params->cParams.strategy == ZSTD_btlazy2)
+ ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
+ else
+ ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
+ }
+
+ if (ms->hashLog3) {
+ U32 const h3Size = (U32)1 << ms->hashLog3;
+ ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
+ }
+}
+
+
+/*-*******************************************************
+* Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ BYTE* const llCodeTable = seqStorePtr->llCode;
+ BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ U32 u;
+ assert(nbSeq <= seqStorePtr->maxNbSeq);
+ for (u=0; u<nbSeq; u++) {
+ U32 const llv = sequences[u].litLength;
+ U32 const mlv = sequences[u].mlBase;
+ llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
+ mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+ }
+ if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+ if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+/* ZSTD_useTargetCBlockSize():
+ * Returns if target compressed block size param is being used.
+ * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
+ return (cctxParams->targetCBlockSize != 0);
+}
+
+/* ZSTD_blockSplitterEnabled():
+ * Returns if block splitting param is being used
+ * If used, compression will do best effort to split a block in order to improve compression ratio.
+ * At the time this function is called, the parameter must be finalized.
+ * Returns 1 if true, 0 otherwise. */
+static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
+{
+ DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
+ assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
+ return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
+}
+
+/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
+ * and size of the sequences statistics
+ */
+typedef struct {
+ U32 LLtype;
+ U32 Offtype;
+ U32 MLtype;
+ size_t size;
+ size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_symbolEncodingTypeStats_t;
+
+/* ZSTD_buildSequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
+ * Modifies `nextEntropy` to have the appropriate values as a side effect.
+ * nbSeq must be greater than 0.
+ *
+ * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq,
+ const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
+ BYTE* dst, const BYTE* const dstEnd,
+ ZSTD_strategy strategy, unsigned* countWorkspace,
+ void* entropyWorkspace, size_t entropyWkspSize) {
+ BYTE* const ostart = dst;
+ const BYTE* const oend = dstEnd;
+ BYTE* op = ostart;
+ FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ stats.lastCountSize = 0;
+ /* convert length/distances into codes */
+ ZSTD_seqToCodes(seqStorePtr);
+ assert(op <= oend);
+ assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
+ /* build CTable for Literal Lengths */
+ { unsigned max = MaxLL;
+ size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building LL table");
+ nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+ stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ LLFSELog, prevEntropy->litlengthCTable,
+ LL_defaultNorm, LL_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(set_basic < set_compressed && set_rle < set_compressed);
+ assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
+ countWorkspace, max, llCodeTable, nbSeq,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ prevEntropy->litlengthCTable,
+ sizeof(prevEntropy->litlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.LLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for Offsets */
+ { unsigned max = MaxOff;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+ ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+ DEBUGLOG(5, "Building OF table");
+ nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+ stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ OffFSELog, prevEntropy->offcodeCTable,
+ OF_defaultNorm, OF_defaultNormLog,
+ defaultPolicy, strategy);
+ assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
+ countWorkspace, max, ofCodeTable, nbSeq,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ prevEntropy->offcodeCTable,
+ sizeof(prevEntropy->offcodeCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.Offtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ /* build CTable for MatchLengths */
+ { unsigned max = MaxML;
+ size_t const mostFrequent = HIST_countFast_wksp(
+ countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
+ DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
+ nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+ stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
+ countWorkspace, max, mostFrequent, nbSeq,
+ MLFSELog, prevEntropy->matchlengthCTable,
+ ML_defaultNorm, ML_defaultNormLog,
+ ZSTD_defaultAllowed, strategy);
+ assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
+ { size_t const countSize = ZSTD_buildCTable(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
+ countWorkspace, max, mlCodeTable, nbSeq,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ prevEntropy->matchlengthCTable,
+ sizeof(prevEntropy->matchlengthCTable),
+ entropyWorkspace, entropyWkspSize);
+ if (ZSTD_isError(countSize)) {
+ DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
+ stats.size = countSize;
+ return stats;
+ }
+ if (stats.MLtype == set_compressed)
+ stats.lastCountSize = countSize;
+ op += countSize;
+ assert(op <= oend);
+ } }
+ stats.size = (size_t)(op-ostart);
+ return stats;
+}
+
+/* ZSTD_entropyCompressSeqStore_internal():
+ * compresses both literals and sequences
+ * Returns compressed size of block, or a zstd error.
+ */
+#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
+MEM_STATIC size_t
+ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ const int bmi2)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ unsigned* count = (unsigned*)entropyWorkspace;
+ FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
+ FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
+ FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
+ const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ size_t lastCountSize;
+
+ entropyWorkspace = count + (MaxSeq + 1);
+ entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
+
+ DEBUGLOG(4, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu)", nbSeq);
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
+
+ /* Compress literals */
+ { const BYTE* const literals = seqStorePtr->litStart;
+ size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart;
+ /* Base suspicion of uncompressibility on ratio of literals to sequences */
+ unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
+ size_t const litSize = (size_t)(seqStorePtr->lit - literals);
+ size_t const cSize = ZSTD_compressLiterals(
+ &prevEntropy->huf, &nextEntropy->huf,
+ cctxParams->cParams.strategy,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ op, dstCapacity,
+ literals, litSize,
+ entropyWorkspace, entropyWkspSize,
+ bmi2, suspectUncompressible);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
+ assert(cSize <= dstCapacity);
+ op += cSize;
+ }
+
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "Can't fit seq hdr in output buf!");
+ if (nbSeq < 128) {
+ *op++ = (BYTE)nbSeq;
+ } else if (nbSeq < LONGNBSEQ) {
+ op[0] = (BYTE)((nbSeq>>8) + 0x80);
+ op[1] = (BYTE)nbSeq;
+ op+=2;
+ } else {
+ op[0]=0xFF;
+ MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
+ op+=3;
+ }
+ assert(op <= oend);
+ if (nbSeq==0) {
+ /* Copy the old tables over as if we repeated them */
+ ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
+ return (size_t)(op - ostart);
+ }
+ {
+ ZSTD_symbolEncodingTypeStats_t stats;
+ BYTE* seqHead = op++;
+ /* build stats for sequences */
+ stats = ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ &prevEntropy->fse, &nextEntropy->fse,
+ op, oend,
+ strategy, count,
+ entropyWorkspace, entropyWkspSize);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
+ lastCountSize = stats.lastCountSize;
+ op += stats.size;
+ }
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, (size_t)(oend - op),
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
+ op += bitstreamSize;
+ assert(op <= oend);
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+ if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
+ /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(lastCountSize + bitstreamSize == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+ }
+
+ DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
+ return (size_t)(op - ostart);
+}
+
+MEM_STATIC size_t
+ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ size_t srcSize,
+ void* entropyWorkspace, size_t entropyWkspSize,
+ int bmi2)
+{
+ size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
+ seqStorePtr, prevEntropy, nextEntropy, cctxParams,
+ dst, dstCapacity,
+ entropyWorkspace, entropyWkspSize, bmi2);
+ if (cSize == 0) return 0;
+ /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
+ * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
+ */
+ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+ return 0; /* block not compressed */
+ FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
+
+ /* Check compressibility */
+ { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
+ if (cSize >= maxCSize) return 0; /* block not compressed */
+ }
+ DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
+ return cSize;
+}
+
+/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
+ * assumption : strat is a valid strategy */
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
+{
+ static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
+ { ZSTD_compressBlock_fast /* default for 0 */,
+ ZSTD_compressBlock_fast,
+ ZSTD_compressBlock_doubleFast,
+ ZSTD_compressBlock_greedy,
+ ZSTD_compressBlock_lazy,
+ ZSTD_compressBlock_lazy2,
+ ZSTD_compressBlock_btlazy2,
+ ZSTD_compressBlock_btopt,
+ ZSTD_compressBlock_btultra,
+ ZSTD_compressBlock_btultra2 },
+ { ZSTD_compressBlock_fast_extDict /* default for 0 */,
+ ZSTD_compressBlock_fast_extDict,
+ ZSTD_compressBlock_doubleFast_extDict,
+ ZSTD_compressBlock_greedy_extDict,
+ ZSTD_compressBlock_lazy_extDict,
+ ZSTD_compressBlock_lazy2_extDict,
+ ZSTD_compressBlock_btlazy2_extDict,
+ ZSTD_compressBlock_btopt_extDict,
+ ZSTD_compressBlock_btultra_extDict,
+ ZSTD_compressBlock_btultra_extDict },
+ { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
+ ZSTD_compressBlock_fast_dictMatchState,
+ ZSTD_compressBlock_doubleFast_dictMatchState,
+ ZSTD_compressBlock_greedy_dictMatchState,
+ ZSTD_compressBlock_lazy_dictMatchState,
+ ZSTD_compressBlock_lazy2_dictMatchState,
+ ZSTD_compressBlock_btlazy2_dictMatchState,
+ ZSTD_compressBlock_btopt_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState,
+ ZSTD_compressBlock_btultra_dictMatchState },
+ { NULL /* default for 0 */,
+ NULL,
+ NULL,
+ ZSTD_compressBlock_greedy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch,
+ NULL,
+ NULL,
+ NULL,
+ NULL }
+ };
+ ZSTD_blockCompressor selectedCompressor;
+ ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
+
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
+ if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
+ static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
+ { ZSTD_compressBlock_greedy_row,
+ ZSTD_compressBlock_lazy_row,
+ ZSTD_compressBlock_lazy2_row },
+ { ZSTD_compressBlock_greedy_extDict_row,
+ ZSTD_compressBlock_lazy_extDict_row,
+ ZSTD_compressBlock_lazy2_extDict_row },
+ { ZSTD_compressBlock_greedy_dictMatchState_row,
+ ZSTD_compressBlock_lazy_dictMatchState_row,
+ ZSTD_compressBlock_lazy2_dictMatchState_row },
+ { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
+ ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
+ ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
+ };
+ DEBUGLOG(4, "Selecting a row-based matchfinder");
+ assert(useRowMatchFinder != ZSTD_ps_auto);
+ selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
+ } else {
+ selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
+ }
+ assert(selectedCompressor != NULL);
+ return selectedCompressor;
+}
+
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+ const BYTE* anchor, size_t lastLLSize)
+{
+ ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
+ seqStorePtr->lit += lastLLSize;
+}
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+ ssPtr->lit = ssPtr->litStart;
+ ssPtr->sequences = ssPtr->sequencesStart;
+ ssPtr->longLengthType = ZSTD_llt_none;
+}
+
+typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
+
+static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
+{
+ ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+ DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ /* Assert that we have correctly flushed the ctx params into the ms's copy */
+ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
+ ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
+ } else {
+ ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
+ }
+ return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
+ }
+ ZSTD_resetSeqStore(&(zc->seqStore));
+ /* required for optimal parser to read stats from dictionary */
+ ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
+ /* tell the optimal parser how we expect to compress literals */
+ ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
+ /* a gap between an attached dict and the current window is not safe,
+ * they must remain adjacent,
+ * and when that stops being the case, the dict must be unset */
+ assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
+
+ /* limited update after a very long match */
+ { const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const U32 curr = (U32)(istart-base);
+ if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
+ if (curr > ms->nextToUpdate + 384)
+ ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
+ }
+
+ /* select and store sequences */
+ { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
+ size_t lastLLSize;
+ { int i;
+ for (i = 0; i < ZSTD_REP_NUM; ++i)
+ zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
+ }
+ if (zc->externSeqStore.pos < zc->externSeqStore.size) {
+ assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&zc->externSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
+ src, srcSize);
+ assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
+ } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
+
+ ldmSeqStore.seq = zc->ldmSequences;
+ ldmSeqStore.capacity = zc->maxNbLdmSequences;
+ /* Updates ldmSeqStore.size */
+ FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+ &zc->appliedParams.ldmParams,
+ src, srcSize), "");
+ /* Updates ldmSeqStore.pos */
+ lastLLSize =
+ ZSTD_ldm_blockCompress(&ldmSeqStore,
+ ms, &zc->seqStore,
+ zc->blockState.nextCBlock->rep,
+ zc->appliedParams.useRowMatchFinder,
+ src, srcSize);
+ assert(ldmSeqStore.pos == ldmSeqStore.size);
+ } else { /* not long range mode */
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
+ zc->appliedParams.useRowMatchFinder,
+ dictMode);
+ ms->ldmSeqStore = NULL;
+ lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
+ }
+ { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
+ ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
+ } }
+ return ZSTDbss_compress;
+}
+
+static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
+{
+ const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
+ const seqDef* seqStoreSeqs = seqStore->sequencesStart;
+ size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
+ size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
+ size_t literalsRead = 0;
+ size_t lastLLSize;
+
+ ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
+ size_t i;
+ repcodes_t updatedRepcodes;
+
+ assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
+ /* Ensure we have enough space for last literals "sequence" */
+ assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
+ ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (i = 0; i < seqStoreSeqSize; ++i) {
+ U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
+ outSeqs[i].litLength = seqStoreSeqs[i].litLength;
+ outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
+ outSeqs[i].rep = 0;
+
+ if (i == seqStore->longLengthPos) {
+ if (seqStore->longLengthType == ZSTD_llt_literalLength) {
+ outSeqs[i].litLength += 0x10000;
+ } else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
+ outSeqs[i].matchLength += 0x10000;
+ }
+ }
+
+ if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
+ /* Derive the correct offset corresponding to a repcode */
+ outSeqs[i].rep = seqStoreSeqs[i].offBase;
+ if (outSeqs[i].litLength != 0) {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
+ } else {
+ if (outSeqs[i].rep == 3) {
+ rawOffset = updatedRepcodes.rep[0] - 1;
+ } else {
+ rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
+ }
+ }
+ }
+ outSeqs[i].offset = rawOffset;
+ /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
+ so we provide seqStoreSeqs[i].offset - 1 */
+ ZSTD_updateRep(updatedRepcodes.rep,
+ seqStoreSeqs[i].offBase - 1,
+ seqStoreSeqs[i].litLength == 0);
+ literalsRead += outSeqs[i].litLength;
+ }
+ /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
+ * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
+ * for the block boundary, according to the API.
+ */
+ assert(seqStoreLiteralsSize >= literalsRead);
+ lastLLSize = seqStoreLiteralsSize - literalsRead;
+ outSeqs[i].litLength = (U32)lastLLSize;
+ outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
+ seqStoreSeqSize++;
+ zc->seqCollector.seqIndex += seqStoreSeqSize;
+}
+
+size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
+ size_t outSeqsSize, const void* src, size_t srcSize)
+{
+ const size_t dstCapacity = ZSTD_compressBound(srcSize);
+ void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
+ SeqCollector seqCollector;
+
+ RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
+
+ seqCollector.collectSequences = 1;
+ seqCollector.seqStart = outSeqs;
+ seqCollector.seqIndex = 0;
+ seqCollector.maxSequences = outSeqsSize;
+ zc->seqCollector = seqCollector;
+
+ ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
+ ZSTD_customFree(dst, ZSTD_defaultCMem);
+ return zc->seqCollector.seqIndex;
+}
+
+size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
+ size_t in = 0;
+ size_t out = 0;
+ for (; in < seqsSize; ++in) {
+ if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
+ if (in != seqsSize - 1) {
+ sequences[in+1].litLength += sequences[in].litLength;
+ }
+ } else {
+ sequences[out] = sequences[in];
+ ++out;
+ }
+ }
+ return out;
+}
+
+/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
+static int ZSTD_isRLE(const BYTE* src, size_t length) {
+ const BYTE* ip = src;
+ const BYTE value = ip[0];
+ const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
+ const size_t unrollSize = sizeof(size_t) * 4;
+ const size_t unrollMask = unrollSize - 1;
+ const size_t prefixLength = length & unrollMask;
+ size_t i;
+ size_t u;
+ if (length == 1) return 1;
+ /* Check if prefix is RLE first before using unrolled loop */
+ if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
+ return 0;
+ }
+ for (i = prefixLength; i != length; i += unrollSize) {
+ for (u = 0; u < unrollSize; u += sizeof(size_t)) {
+ if (MEM_readST(ip + i + u) != valueST) {
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+/* Returns true if the given block may be RLE.
+ * This is just a heuristic based on the compressibility.
+ * It may return both false positives and false negatives.
+ */
+static int ZSTD_maybeRLE(seqStore_t const* seqStore)
+{
+ size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
+ size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
+
+ return nbSeqs < 4 && nbLits < 10;
+}
+
+static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
+{
+ ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
+ bs->prevCBlock = bs->nextCBlock;
+ bs->nextCBlock = tmp;
+}
+
+/* Writes the block header */
+static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock) {
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
+}
+
+/* ZSTD_buildBlockEntropyStats_literals() :
+ * Builds entropy for the literals.
+ * Stores literals block type (raw, rle, compressed, repeat) and
+ * huffman description table to hufMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE workspace
+ * @return : size of huffman description table or error code */
+static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
+ const ZSTD_hufCTables_t* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const int literalsCompressionIsDisabled,
+ void* workspace, size_t wkspSize)
+{
+ BYTE* const wkspStart = (BYTE*)workspace;
+ BYTE* const wkspEnd = wkspStart + wkspSize;
+ BYTE* const countWkspStart = wkspStart;
+ unsigned* const countWksp = (unsigned*)workspace;
+ const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
+ BYTE* const nodeWksp = countWkspStart + countWkspSize;
+ const size_t nodeWkspSize = wkspEnd-nodeWksp;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ unsigned huffLog = HUF_TABLELOG_DEFAULT;
+ HUF_repeat repeat = prevHuf->repeatMode;
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (literalsCompressionIsDisabled) {
+ DEBUGLOG(5, "set_basic - disabled");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+
+ /* small ? don't even attempt compression (speed opt) */
+#ifndef COMPRESS_LITERALS_SIZE_MIN
+#define COMPRESS_LITERALS_SIZE_MIN 63
+#endif
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) {
+ DEBUGLOG(5, "set_basic - too small");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Scan input and build symbol stats */
+ { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
+ FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
+ if (largest == srcSize) {
+ DEBUGLOG(5, "set_rle");
+ hufMetadata->hType = set_rle;
+ return 0;
+ }
+ if (largest <= (srcSize >> 7)+4) {
+ DEBUGLOG(5, "set_basic - no gain");
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ }
+
+ /* Validate the previous Huffman table */
+ if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
+ repeat = HUF_repeat_none;
+ }
+
+ /* Build Huffman Tree */
+ ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
+ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+ { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
+ maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
+ huffLog = (U32)maxBits;
+ { /* Build and write the CTable */
+ size_t const newCSize = HUF_estimateCompressedSize(
+ (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
+ size_t const hSize = HUF_writeCTable_wksp(
+ hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
+ (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
+ nodeWksp, nodeWkspSize);
+ /* Check against repeating the previous CTable */
+ if (repeat != HUF_repeat_none) {
+ size_t const oldCSize = HUF_estimateCompressedSize(
+ (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
+ if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
+ DEBUGLOG(5, "set_repeat - smaller");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_repeat;
+ return 0;
+ }
+ }
+ if (newCSize + hSize >= srcSize) {
+ DEBUGLOG(5, "set_basic - no gains");
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ hufMetadata->hType = set_basic;
+ return 0;
+ }
+ DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
+ hufMetadata->hType = set_compressed;
+ nextHuf->repeatMode = HUF_repeat_check;
+ return hSize;
+ }
+ }
+}
+
+
+/* ZSTD_buildDummySequencesStatistics():
+ * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
+ * and updates nextEntropy to the appropriate repeatMode.
+ */
+static ZSTD_symbolEncodingTypeStats_t
+ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) {
+ ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0};
+ nextEntropy->litlength_repeatMode = FSE_repeat_none;
+ nextEntropy->offcode_repeatMode = FSE_repeat_none;
+ nextEntropy->matchlength_repeatMode = FSE_repeat_none;
+ return stats;
+}
+
+/* ZSTD_buildBlockEntropyStats_sequences() :
+ * Builds entropy for the sequences.
+ * Stores symbol compression modes and fse table to fseMetadata.
+ * Requires ENTROPY_WORKSPACE_SIZE wksp.
+ * @return : size of fse tables or error code */
+static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr,
+ const ZSTD_fseCTables_t* prevEntropy,
+ ZSTD_fseCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize)
+{
+ ZSTD_strategy const strategy = cctxParams->cParams.strategy;
+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+ BYTE* const ostart = fseMetadata->fseTablesBuffer;
+ BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
+ BYTE* op = ostart;
+ unsigned* countWorkspace = (unsigned*)workspace;
+ unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
+ size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
+ ZSTD_symbolEncodingTypeStats_t stats;
+
+ DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
+ stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
+ prevEntropy, nextEntropy, op, oend,
+ strategy, countWorkspace,
+ entropyWorkspace, entropyWorkspaceSize)
+ : ZSTD_buildDummySequencesStatistics(nextEntropy);
+ FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
+ fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
+ fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
+ fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
+ fseMetadata->lastCountSize = stats.lastCountSize;
+ return stats.size;
+}
+
+
+/* ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * Requires workspace size ENTROPY_WORKSPACE_SIZE
+ *
+ * @return : 0 on success or error code
+ */
+size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize)
+{
+ size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
+ entropyMetadata->hufMetadata.hufDesSize =
+ ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
+ &prevEntropy->huf, &nextEntropy->huf,
+ &entropyMetadata->hufMetadata,
+ ZSTD_literalsCompressionIsDisabled(cctxParams),
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
+ entropyMetadata->fseMetadata.fseTablesSize =
+ ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
+ &prevEntropy->fse, &nextEntropy->fse,
+ cctxParams,
+ &entropyMetadata->fseMetadata,
+ workspace, wkspSize);
+ FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
+ return 0;
+}
+
+/* Returns the size estimate for the literals section (header + content) of a block */
+static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+ size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
+ U32 singleStream = litSize < 256;
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
+static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
+ const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
+ const FSE_CTable* fseCTable,
+ const U8* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ (void)defaultMax;
+ cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
+ return nbSeq * 10;
+ }
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits >> 3;
+}
+
+/* Returns the size estimate for the sequences section (header + content) of a block */
+static size_t ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
+ size_t cSeqSizeEstimate = 0;
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
+ fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
+ fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
+ fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+/* Returns the size estimate for a given stream of literals, of, ll, ml */
+static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy) {
+ size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return seqSize + literalsSize + ZSTD_blockHeaderSize;
+}
+
+/* Builds entropy statistics and uses them for blocksize estimation.
+ *
+ * Returns the estimated compressed size of the seqStore, or a zstd error.
+ */
+static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) {
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
+ DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ entropyMetadata,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+ return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
+ seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
+ (size_t)(seqStore->sequences - seqStore->sequencesStart),
+ &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
+ (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
+}
+
+/* Returns literals bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) {
+ size_t literalsBytes = 0;
+ size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ seqDef seq = seqStore->sequencesStart[i];
+ literalsBytes += seq.litLength;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
+ literalsBytes += 0x10000;
+ }
+ }
+ return literalsBytes;
+}
+
+/* Returns match bytes represented in a seqStore */
+static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
+ size_t matchBytes = 0;
+ size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart;
+ size_t i;
+ for (i = 0; i < nbSeqs; ++i) {
+ seqDef seq = seqStore->sequencesStart[i];
+ matchBytes += seq.mlBase + MINMATCH;
+ if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
+ matchBytes += 0x10000;
+ }
+ }
+ return matchBytes;
+}
+
+/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
+ * Stores the result in resultSeqStore.
+ */
+static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
+ const seqStore_t* originalSeqStore,
+ size_t startIdx, size_t endIdx) {
+ BYTE* const litEnd = originalSeqStore->lit;
+ size_t literalsBytes;
+ size_t literalsBytesPreceding = 0;
+
+ *resultSeqStore = *originalSeqStore;
+ if (startIdx > 0) {
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
+ literalsBytesPreceding = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ }
+
+ /* Move longLengthPos into the correct position if necessary */
+ if (originalSeqStore->longLengthType != ZSTD_llt_none) {
+ if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
+ resultSeqStore->longLengthType = ZSTD_llt_none;
+ } else {
+ resultSeqStore->longLengthPos -= (U32)startIdx;
+ }
+ }
+ resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
+ resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
+ literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
+ resultSeqStore->litStart += literalsBytesPreceding;
+ if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
+ /* This accounts for possible last literals if the derived chunk reaches the end of the block */
+ resultSeqStore->lit = litEnd;
+ } else {
+ resultSeqStore->lit = resultSeqStore->litStart+literalsBytes;
+ }
+ resultSeqStore->llCode += startIdx;
+ resultSeqStore->mlCode += startIdx;
+ resultSeqStore->ofCode += startIdx;
+}
+
+/*
+ * Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
+ * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
+ */
+static U32
+ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
+{
+ U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
+ assert(STORED_IS_REPCODE(offCode));
+ if (adjustedOffCode == ZSTD_REP_NUM) {
+ /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
+ assert(rep[0] > 0);
+ return rep[0] - 1;
+ }
+ return rep[adjustedOffCode];
+}
+
+/*
+ * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
+ * due to emission of RLE/raw blocks that disturb the offset history,
+ * and replaces any repcodes within the seqStore that may be invalid.
+ *
+ * dRepcodes are updated as would be on the decompression side.
+ * cRepcodes are updated exactly in accordance with the seqStore.
+ *
+ * Note : this function assumes seq->offBase respects the following numbering scheme :
+ * 0 : invalid
+ * 1-3 : repcode 1-3
+ * 4+ : real_offset+3
+ */
+static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
+ seqStore_t* const seqStore, U32 const nbSeq) {
+ U32 idx = 0;
+ for (; idx < nbSeq; ++idx) {
+ seqDef* const seq = seqStore->sequencesStart + idx;
+ U32 const ll0 = (seq->litLength == 0);
+ U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
+ assert(seq->offBase > 0);
+ if (STORED_IS_REPCODE(offCode)) {
+ U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
+ U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
+ /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
+ * the repcode with the offset it actually references, determined by the compression
+ * repcode history.
+ */
+ if (dRawOffset != cRawOffset) {
+ seq->offBase = cRawOffset + ZSTD_REP_NUM;
+ }
+ }
+ /* Compression repcode history is always updated with values directly from the unmodified seqStore.
+ * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
+ */
+ ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
+ ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
+ }
+}
+
+/* ZSTD_compressSeqStore_singleBlock():
+ * Compresses a seqStore into a block with a block header, into the buffer dst.
+ *
+ * Returns the total size of that block (including header) or a ZSTD error code.
+ */
+static size_t
+ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
+ repcodes_t* const dRep, repcodes_t* const cRep,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock, U32 isPartition)
+{
+ const U32 rleMaxLength = 25;
+ BYTE* op = (BYTE*)dst;
+ const BYTE* ip = (const BYTE*)src;
+ size_t cSize;
+ size_t cSeqsSize;
+
+ /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
+ repcodes_t const dRepOriginal = *dRep;
+ DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
+ if (isPartition)
+ ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
+ cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
+ srcSize,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+ FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
+
+ if (!zc->isFirstBlock &&
+ cSeqsSize < rleMaxLength &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ cSeqsSize = 1;
+ }
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+
+ if (cSeqsSize == 0) {
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else if (cSeqsSize == 1) {
+ cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
+ *dRep = dRepOriginal; /* reset simulated decompression repcode history */
+ } else {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
+ cSize = ZSTD_blockHeaderSize + cSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
+ }
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+/* Struct to keep track of where we are in our recursive calls. */
+typedef struct {
+ U32* splitLocations; /* Array of split indices */
+ size_t idx; /* The current index within splitLocations being worked on */
+} seqStoreSplits;
+
+#define MIN_SEQUENCES_BLOCK_SPLITTING 300
+
+/* Helper function to perform the recursive search for block splits.
+ * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
+ * If advantageous to split, then we recurse down the two sub-blocks. If not, or if an error occurred in estimation, then
+ * we do not recurse.
+ *
+ * Note: The recursion depth is capped by a heuristic minimum number of sequences, defined by MIN_SEQUENCES_BLOCK_SPLITTING.
+ * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
+ * In practice, recursion depth usually doesn't go beyond 4.
+ *
+ * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
+ * maximum of 128 KB, this value is actually impossible to reach.
+ */
+static void
+ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
+ ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
+{
+ seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
+ seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
+ seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
+ size_t estimatedOriginalSize;
+ size_t estimatedFirstHalfSize;
+ size_t estimatedSecondHalfSize;
+ size_t midIdx = (startIdx + endIdx)/2;
+
+ if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
+ DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences");
+ return;
+ }
+ DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
+ ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
+ ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
+ ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
+ estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
+ estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
+ estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
+ DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
+ estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
+ if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
+ return;
+ }
+ if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
+ ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
+ splits->splitLocations[splits->idx] = (U32)midIdx;
+ splits->idx++;
+ ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
+ }
+}
+
+/* Base recursive function. Populates a table with intra-block partition indices that can improve compression ratio.
+ *
+ * Returns the number of splits made (which equals the size of the partition table - 1).
+ */
+static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) {
+ seqStoreSplits splits = {partitions, 0};
+ if (nbSeq <= 4) {
+ DEBUGLOG(4, "ZSTD_deriveBlockSplits: Too few sequences to split");
+ /* Refuse to try and split anything with less than 4 sequences */
+ return 0;
+ }
+ ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
+ splits.splitLocations[splits.idx] = nbSeq;
+ DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
+ return splits.idx;
+}
+
+/* ZSTD_compressBlock_splitBlock():
+ * Attempts to split a given block into multiple blocks to improve compression ratio.
+ *
+ * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
+ */
+static size_t
+ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
+ const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
+{
+ size_t cSize = 0;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ size_t i = 0;
+ size_t srcBytesTotal = 0;
+ U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
+ seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
+ seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore;
+ size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
+
+ /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
+ * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
+ * separate repcode histories that simulate repcode history on compression and decompression side,
+ * and use the histories to determine whether we must replace a particular repcode with its raw offset.
+ *
+ * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
+ * or RLE. This allows us to retrieve the offset value that an invalid repcode references within
+ * a nocompress/RLE block.
+ * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
+ * the replacement offset value rather than the original repcode to update the repcode history.
+ * dRep also will be the final repcode history sent to the next block.
+ *
+ * See ZSTD_seqStore_resolveOffCodes() for more details.
+ */
+ repcodes_t dRep;
+ repcodes_t cRep;
+ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
+
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ if (numSplits == 0) {
+ size_t cSizeSingleBlock = ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, blockSize,
+ lastBlock, 0 /* isPartition */);
+ FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
+ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
+ assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ return cSizeSingleBlock;
+ }
+
+ ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
+ for (i = 0; i <= numSplits; ++i) {
+ size_t srcBytes;
+ size_t cSizeChunk;
+ U32 const lastPartition = (i == numSplits);
+ U32 lastBlockEntireSrc = 0;
+
+ srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
+ srcBytesTotal += srcBytes;
+ if (lastPartition) {
+ /* This is the final partition, need to account for possible last literals */
+ srcBytes += blockSize - srcBytesTotal;
+ lastBlockEntireSrc = lastBlock;
+ } else {
+ ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
+ }
+
+ cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
+ &dRep, &cRep,
+ op, dstCapacity,
+ ip, srcBytes,
+ lastBlockEntireSrc, 1 /* isPartition */);
+ DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
+ FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
+
+ ip += srcBytes;
+ op += cSizeChunk;
+ dstCapacity -= cSizeChunk;
+ cSize += cSizeChunk;
+ *currSeqStore = *nextSeqStore;
+ assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize);
+ }
+ /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes
+ * for the next block.
+ */
+ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
+ return cSize;
+}
+
+static size_t
+ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 lastBlock)
+{
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ U32 nbSeq;
+ size_t cSize;
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
+ assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) {
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
+ return cSize;
+ }
+ nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
+ }
+
+ cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
+ FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
+ return cSize;
+}
+
+static size_t
+ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, U32 frame)
+{
+ /* This the upper bound for the length of an rle block.
+ * This isn't the actual upper bound. Finding the real threshold
+ * needs further investigation.
+ */
+ const U32 rleMaxLength = 25;
+ size_t cSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* op = (BYTE*)dst;
+ DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
+ (unsigned)zc->blockState.matchState.nextToUpdate);
+
+ { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+ if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
+ }
+
+ if (zc->seqCollector.collectSequences) {
+ ZSTD_copyBlockSequences(zc);
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return 0;
+ }
+
+ /* encode sequences and literals */
+ cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ srcSize,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ zc->bmi2);
+
+ if (frame &&
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ cSize < rleMaxLength &&
+ ZSTD_isRLE(ip, srcSize))
+ {
+ cSize = 1;
+ op[0] = ip[0];
+ }
+
+out:
+ if (!ZSTD_isError(cSize) && cSize > 1) {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ }
+ /* We check that dictionaries have offset codes available for the first
+ * block. After the first block, the offcode table might not have large
+ * enough codes to represent the offsets in the data.
+ */
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const size_t bss, U32 lastBlock)
+{
+ DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
+ if (bss == ZSTDbss_compress) {
+ if (/* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ !zc->isFirstBlock &&
+ ZSTD_maybeRLE(&zc->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize))
+ {
+ return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
+ }
+ /* Attempt superblock compression.
+ *
+ * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
+ * standard ZSTD_compressBound(). This is a problem, because even if we have
+ * space now, taking an extra byte now could cause us to run out of space later
+ * and violate ZSTD_compressBound().
+ *
+ * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
+ *
+ * In order to respect ZSTD_compressBound() we must attempt to emit a raw
+ * uncompressed block in these cases:
+ * * cSize == 0: Return code for an uncompressed block.
+ * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
+ * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
+ * output space.
+ * * cSize >= blockBound(srcSize): We have expanded the block too much so
+ * emit an uncompressed block.
+ */
+ {
+ size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
+ if (cSize != ERROR(dstSize_tooSmall)) {
+ size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
+ if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
+ return cSize;
+ }
+ }
+ }
+ }
+
+ DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
+ /* Superblock compression failed, attempt to emit a single no compress block.
+ * The decoder will be able to stream this block since it is uncompressed.
+ */
+ return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
+}
+
+static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastBlock)
+{
+ size_t cSize = 0;
+ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
+ DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
+ (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
+ FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
+
+ cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
+
+ if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ return cSize;
+}
+
+static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ void const* ip,
+ void const* iend)
+{
+ U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
+ U32 const maxDist = (U32)1 << params->cParams.windowLog;
+ if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
+ U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+ ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+ ZSTD_cwksp_mark_tables_dirty(ws);
+ ZSTD_reduceIndex(ms, params, correction);
+ ZSTD_cwksp_mark_tables_clean(ws);
+ if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+ else ms->nextToUpdate -= correction;
+ /* invalidate dictionaries on overflow correction */
+ ms->loadedDictEnd = 0;
+ ms->dictMatchState = NULL;
+ }
+}
+
+/*! ZSTD_compress_frameChunk() :
+* Compress a chunk of data into one or multiple blocks.
+* All blocks will be terminated, all input will be consumed.
+* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+* Frame is supposed already started (header already produced)
+* @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 lastFrameChunk)
+{
+ size_t blockSize = cctx->blockSize;
+ size_t remaining = srcSize;
+ const BYTE* ip = (const BYTE*)src;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+
+ assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
+
+ DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize)
+ xxh64_update(&cctx->xxhState, src, srcSize);
+
+ while (remaining) {
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
+ dstSize_tooSmall,
+ "not enough space to store compressed block");
+ if (remaining < blockSize) blockSize = remaining;
+
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
+ ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+ ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
+
+ /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
+ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
+
+ { size_t cSize;
+ if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
+ assert(cSize > 0);
+ assert(cSize <= blockSize + ZSTD_blockHeaderSize);
+ } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
+ cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
+ assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
+ } else {
+ cSize = ZSTD_compressBlock_internal(cctx,
+ op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
+ ip, blockSize, 1 /* frame */);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
+
+ if (cSize == 0) { /* block is not compressible */
+ cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ } else {
+ U32 const cBlockHeader = cSize == 1 ?
+ lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
+ lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cSize += ZSTD_blockHeaderSize;
+ }
+ }
+
+
+ ip += blockSize;
+ assert(remaining >= blockSize);
+ remaining -= blockSize;
+ op += cSize;
+ assert(dstCapacity >= cSize);
+ dstCapacity -= cSize;
+ cctx->isFirstBlock = 0;
+ DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
+ (unsigned)cSize);
+ } }
+
+ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+ return (size_t)(op-ostart);
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
+{ BYTE* const op = (BYTE*)dst;
+ U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
+ U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
+ U32 const checksumFlag = params->fParams.checksumFlag>0;
+ U32 const windowSize = (U32)1 << params->cParams.windowLog;
+ U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+ BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+ U32 const fcsCode = params->fParams.contentSizeFlag ?
+ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
+ BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+ size_t pos=0;
+
+ assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
+ RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
+ "dst buf is too small to fit worst-case frame header size.");
+ DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+ !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
+ if (params->format == ZSTD_f_zstd1) {
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+ pos = 4;
+ }
+ op[pos++] = frameHeaderDescriptionByte;
+ if (!singleSegment) op[pos++] = windowLogByte;
+ switch(dictIDSizeCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : break;
+ case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+ }
+ switch(fcsCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+ }
+ return pos;
+}
+
+/* ZSTD_writeSkippableFrame_advanced() :
+ * Writes out a skippable frame with the specified magic number variant (16 are supported),
+ * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
+ *
+ * Returns the total number of bytes written, or a ZSTD error code.
+ */
+size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, unsigned magicVariant) {
+ BYTE* op = (BYTE*)dst;
+ RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
+ dstSize_tooSmall, "Not enough room for skippable frame");
+ RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
+ RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
+
+ MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
+ MEM_writeLE32(op+4, (U32)srcSize);
+ ZSTD_memcpy(op+8, src, srcSize);
+ return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
+}
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+{
+ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
+ "dst buf is too small to write frame trailer empty block.");
+ { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
+ MEM_writeLE24(dst, cBlockHeader24);
+ return ZSTD_blockHeaderSize;
+ }
+}
+
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+{
+ RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
+ "wrong cctx stage");
+ RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
+ parameter_unsupported,
+ "incompatible with ldm");
+ cctx->externSeqStore.seq = seq;
+ cctx->externSeqStore.size = nbSeq;
+ cctx->externSeqStore.capacity = nbSeq;
+ cctx->externSeqStore.pos = 0;
+ cctx->externSeqStore.posInSequence = 0;
+ return 0;
+}
+
+
+static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ U32 frame, U32 lastFrameChunk)
+{
+ ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+ size_t fhSize = 0;
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
+ cctx->stage, (unsigned)srcSize);
+ RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
+ "missing init (ZSTD_compressBegin)");
+
+ if (frame && (cctx->stage==ZSTDcs_init)) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
+ cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ assert(fhSize <= dstCapacity);
+ dstCapacity -= fhSize;
+ dst = (char*)dst + fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (!srcSize) return fhSize; /* do not generate an empty block if no input */
+
+ if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
+ ms->forceNonContiguous = 0;
+ ms->nextToUpdate = ms->window.dictLimit;
+ }
+ if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
+ ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
+ }
+
+ if (!frame) {
+ /* overflow check and correction for block mode */
+ ZSTD_overflowCorrectIfNeeded(
+ ms, &cctx->workspace, &cctx->appliedParams,
+ src, (BYTE const*)src + srcSize);
+ }
+
+ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
+ { size_t const cSize = frame ?
+ ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
+ FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
+ cctx->consumedSrcSize += srcSize;
+ cctx->producedCSize += (cSize + fhSize);
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ RETURN_ERROR_IF(
+ cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ return cSize + fhSize;
+ }
+}
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
+}
+
+
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+ ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
+ assert(!ZSTD_checkCParams(cParams));
+ return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
+ { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+ RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
+
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
+}
+
+/*! ZSTD_loadDictionaryContent() :
+ * @return : 0, or an error code
+ */
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* src, size_t srcSize,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+ int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
+
+ /* Assert that we the ms params match the params we're being given */
+ ZSTD_assertEqualCParams(params->cParams, ms->cParams);
+
+ if (srcSize > ZSTD_CHUNKSIZE_MAX) {
+ /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
+ * Dictionaries right at the edge will immediately trigger overflow
+ * correction, but I don't want to insert extra constraints here.
+ */
+ U32 const maxDictSize = ZSTD_CURRENT_MAX - 1;
+ /* We must have cleared our windows when our source is this large. */
+ assert(ZSTD_window_isEmpty(ms->window));
+ if (loadLdmDict)
+ assert(ZSTD_window_isEmpty(ls->window));
+ /* If the dictionary is too large, only load the suffix of the dictionary. */
+ if (srcSize > maxDictSize) {
+ ip = iend - maxDictSize;
+ src = ip;
+ srcSize = maxDictSize;
+ }
+ }
+
+ DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
+ ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
+ ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+ ms->forceNonContiguous = params->deterministicRefPrefix;
+
+ if (loadLdmDict) {
+ ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
+ ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
+ }
+
+ if (srcSize <= HASH_READ_SIZE) return 0;
+
+ ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
+
+ if (loadLdmDict)
+ ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
+
+ switch(params->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, dtlm);
+ break;
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, dtlm);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ assert(srcSize >= HASH_READ_SIZE);
+ if (ms->dedicatedDictSearch) {
+ assert(ms->chainTable != NULL);
+ ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
+ } else {
+ assert(params->useRowMatchFinder != ZSTD_ps_auto);
+ if (params->useRowMatchFinder == ZSTD_ps_enable) {
+ size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
+ ZSTD_memset(ms->tagTable, 0, tagTableSize);
+ ZSTD_row_update(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using row-based hash table for lazy dict");
+ } else {
+ ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
+ DEBUGLOG(4, "Using chain-based hash table for lazy dict");
+ }
+ }
+ break;
+
+ case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ assert(srcSize >= HASH_READ_SIZE);
+ ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
+ break;
+
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ ms->nextToUpdate = (U32)(iend - ms->window.base);
+ return 0;
+}
+
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+ * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
+ * and only dictionaries with 100% valid symbols can be assumed valid.
+ */
+static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
+{
+ U32 s;
+ if (dictMaxSymbolValue < maxSymbolValue) {
+ return FSE_repeat_check;
+ }
+ for (s = 0; s <= maxSymbolValue; ++s) {
+ if (normalizedCounter[s] == 0) {
+ return FSE_repeat_check;
+ }
+ }
+ return FSE_repeat_valid;
+}
+
+size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+ const void* const dict, size_t dictSize)
+{
+ short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff;
+ const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ dictPtr += 8;
+ bs->entropy.huf.repeatMode = HUF_repeat_check;
+
+ { unsigned maxSymbolValue = 255;
+ unsigned hasZeroWeights = 1;
+ size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
+ dictEnd-dictPtr, &hasZeroWeights);
+
+ /* We only set the loaded table as valid if it contains all non-zero
+ * weights. Otherwise, we set it to check */
+ if (!hasZeroWeights)
+ bs->entropy.huf.repeatMode = HUF_repeat_valid;
+
+ RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
+ dictPtr += hufHeaderSize;
+ }
+
+ { unsigned offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
+ /* fill all offset symbols to avoid garbage at end of table */
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.offcodeCTable,
+ offcodeNCount, MaxOff, offcodeLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.matchlengthCTable,
+ matchlengthNCount, matchlengthMaxValue, matchlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
+ RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
+ bs->entropy.fse.litlengthCTable,
+ litlengthNCount, litlengthMaxValue, litlengthLog,
+ workspace, HUF_WORKSPACE_SIZE)),
+ dictionary_corrupted, "");
+ bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
+ bs->rep[0] = MEM_readLE32(dictPtr+0);
+ bs->rep[1] = MEM_readLE32(dictPtr+4);
+ bs->rep[2] = MEM_readLE32(dictPtr+8);
+ dictPtr += 12;
+
+ { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ U32 offcodeMax = MaxOff;
+ if (dictContentSize <= ((U32)-1) - 128 KB) {
+ U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+ offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
+ }
+ /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
+ bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
+
+ /* All repCodes must be <= dictContentSize and != 0 */
+ { U32 u;
+ for (u=0; u<3; u++) {
+ RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
+ RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
+ } } }
+
+ return dictPtr - (const BYTE*)dict;
+}
+
+/* Dictionary format :
+ * See :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
+ */
+/*! ZSTD_loadZstdDictionary() :
+ * @return : dictID, or an error code
+ * assumptions : magic number supposed already checked
+ * dictSize supposed >= 8
+ */
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ZSTD_cwksp* ws,
+ ZSTD_CCtx_params const* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+ size_t dictID;
+ size_t eSize;
+ ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+ assert(dictSize >= 8);
+ assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
+
+ dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
+ eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
+ FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
+ dictPtr += eSize;
+
+ {
+ size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+ FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
+ ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
+ }
+ return dictID;
+}
+
+/* ZSTD_compress_insertDictionary() :
+* @return : dictID, or an error code */
+static size_t
+ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
+ ZSTD_matchState_t* ms,
+ ldmState_t* ls,
+ ZSTD_cwksp* ws,
+ const ZSTD_CCtx_params* params,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ void* workspace)
+{
+ DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
+ if ((dict==NULL) || (dictSize<8)) {
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
+ return 0;
+ }
+
+ ZSTD_reset_compressedBlockState(bs);
+
+ /* dict restricted modes */
+ if (dictContentType == ZSTD_dct_rawContent)
+ return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
+
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_auto) {
+ DEBUGLOG(4, "raw content dictionary detected");
+ return ZSTD_loadDictionaryContent(
+ ms, ls, ws, params, dict, dictSize, dtlm);
+ }
+ RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
+ assert(0); /* impossible */
+ }
+
+ /* dict as full zstd dictionary */
+ return ZSTD_loadZstdDictionary(
+ bs, ms, ws, params, dict, dictSize, dtlm, workspace);
+}
+
+#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
+#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
+
+/*! ZSTD_compressBegin_internal() :
+ * @return : 0, or an error code */
+static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
+ ZSTD_buffered_policy_e zbuff)
+{
+ size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
+ DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
+ /* params are supposed to be fully validated at this point */
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if ( (cdict)
+ && (cdict->dictContentSize > 0)
+ && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0)
+ && (params->attachDictPref != ZSTD_dictForceLoad) ) {
+ return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
+ }
+
+ FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+ dictContentSize,
+ ZSTDcrp_makeClean, zbuff) , "");
+ { size_t const dictID = cdict ?
+ ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
+ cdict->dictContentSize, cdict->dictContentType, dtlm,
+ cctx->entropyWorkspace)
+ : ZSTD_compress_insertDictionary(
+ cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+ &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
+ dictContentType, dtlm, cctx->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
+ assert(dictID <= UINT_MAX);
+ cctx->dictID = (U32)dictID;
+ cctx->dictContentSize = dictContentSize;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
+ /* compression parameters verification and optimization */
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
+ return ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, dictContentType, dtlm,
+ cdict,
+ params, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+/*! ZSTD_compressBegin_advanced() :
+* @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compressBegin_advanced_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ NULL /*cdict*/,
+ &cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_CCtx_params cctxParams;
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
+ }
+ DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+}
+
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
+{
+ return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+}
+
+
+/*! ZSTD_writeEpilogue() :
+* Ends a frame.
+* @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* op = ostart;
+ size_t fhSize = 0;
+
+ DEBUGLOG(4, "ZSTD_writeEpilogue");
+ RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
+
+ /* special case : empty frame */
+ if (cctx->stage == ZSTDcs_init) {
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
+ FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
+ dstCapacity -= fhSize;
+ op += fhSize;
+ cctx->stage = ZSTDcs_ongoing;
+ }
+
+ if (cctx->stage != ZSTDcs_ending) {
+ /* write one last empty block, make it the "last" block */
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ }
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32(op, checksum);
+ op += 4;
+ }
+
+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
+ return op-ostart;
+}
+
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
+{
+ (void)cctx;
+ (void)extraCSize;
+}
+
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t endResult;
+ size_t const cSize = ZSTD_compressContinue_internal(cctx,
+ dst, dstCapacity, src, srcSize,
+ 1 /* frame mode */, 1 /* last chunk */);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
+ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+ FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
+ assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
+ if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
+ DEBUGLOG(4, "end of frame : controlling src size");
+ RETURN_ERROR_IF(
+ cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
+ srcSize_wrong,
+ "error : pledgedSrcSize = %u, while realSrcSize = %u",
+ (unsigned)cctx->pledgedSrcSizePlusOne-1,
+ (unsigned)cctx->consumedSrcSize);
+ }
+ ZSTD_CCtx_trace(cctx, endResult);
+ return cSize + endResult;
+}
+
+size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ ZSTD_parameters params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced");
+ FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
+ return ZSTD_compress_advanced_internal(cctx,
+ dst, dstCapacity,
+ src, srcSize,
+ dict, dictSize,
+ &cctx->simpleApiParams);
+}
+
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ const ZSTD_CCtx_params* params)
+{
+ DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
+ params, srcSize, ZSTDb_not_buffered) , "");
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ int compressionLevel)
+{
+ {
+ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
+ assert(params.fParams.contentSizeFlag == 1);
+ ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
+ }
+ DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
+ return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
+}
+
+size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
+ assert(cctx != NULL);
+ return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_compress(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ int compressionLevel)
+{
+ size_t result;
+ ZSTD_CCtx* cctx = ZSTD_createCCtx();
+ RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
+ result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
+ ZSTD_freeCCtx(cctx);
+ return result;
+}
+
+
+/* ===== Dictionary API ===== */
+
+/*! ZSTD_estimateCDictSize_advanced() :
+ * Estimate amount of memory that will be needed to create a dictionary with following arguments */
+size_t ZSTD_estimateCDictSize_advanced(
+ size_t dictSize, ZSTD_compressionParameters cParams,
+ ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
+ return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
+ * in case we are using DDS with row-hash. */
+ + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
+ /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
+}
+
+size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
+}
+
+size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support sizeof on NULL */
+ DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
+ /* cdict may be in the workspace */
+ return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
+ + ZSTD_cwksp_sizeof(&cdict->workspace);
+}
+
+static size_t ZSTD_initCDict_internal(
+ ZSTD_CDict* cdict,
+ const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_CCtx_params params)
+{
+ DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
+ assert(!ZSTD_checkCParams(params.cParams));
+ cdict->matchState.cParams = params.cParams;
+ cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
+ cdict->dictContent = dictBuffer;
+ } else {
+ void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
+ RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
+ cdict->dictContent = internalBuffer;
+ ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
+ }
+ cdict->dictContentSize = dictSize;
+ cdict->dictContentType = dictContentType;
+
+ cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
+
+
+ /* Reset the state to no dictionary */
+ ZSTD_reset_compressedBlockState(&cdict->cBlockState);
+ FORWARD_IF_ERROR(ZSTD_reset_matchState(
+ &cdict->matchState,
+ &cdict->workspace,
+ &params.cParams,
+ params.useRowMatchFinder,
+ ZSTDcrp_makeClean,
+ ZSTDirp_reset,
+ ZSTD_resetTarget_CDict), "");
+ /* (Maybe) load the dictionary
+ * Skips loading the dictionary if it is < 8 bytes.
+ */
+ { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+ params.fParams.contentSizeFlag = 1;
+ { size_t const dictID = ZSTD_compress_insertDictionary(
+ &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
+ &params, cdict->dictContent, cdict->dictContentSize,
+ dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
+ FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
+ assert(dictID <= (size_t)(U32)-1);
+ cdict->dictID = (U32)dictID;
+ }
+ }
+
+ return 0;
+}
+
+static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_compressionParameters cParams,
+ ZSTD_paramSwitch_e useRowMatchFinder,
+ U32 enableDedicatedDictSearch,
+ ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { size_t const workspaceSize =
+ ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
+ ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
+ ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
+ (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
+ void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
+ ZSTD_cwksp ws;
+ ZSTD_CDict* cdict;
+
+ if (!workspace) {
+ ZSTD_customFree(workspace, customMem);
+ return NULL;
+ }
+
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
+
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ assert(cdict != NULL);
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ cdict->customMem = customMem;
+ cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
+ cdict->useRowMatchFinder = useRowMatchFinder;
+ return cdict;
+ }
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams;
+ ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
+ ZSTD_CCtxParams_init(&cctxParams, 0);
+ cctxParams.cParams = cParams;
+ cctxParams.customMem = customMem;
+ return ZSTD_createCDict_advanced2(
+ dictBuffer, dictSize,
+ dictLoadMethod, dictContentType,
+ &cctxParams, customMem);
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced2(
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ const ZSTD_CCtx_params* originalCctxParams,
+ ZSTD_customMem customMem)
+{
+ ZSTD_CCtx_params cctxParams = *originalCctxParams;
+ ZSTD_compressionParameters cParams;
+ ZSTD_CDict* cdict;
+
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
+ if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+ if (cctxParams.enableDedicatedDictSearch) {
+ cParams = ZSTD_dedicatedDictSearch_getCParams(
+ cctxParams.compressionLevel, dictSize);
+ ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
+ } else {
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
+ /* Fall back to non-DDSS params */
+ cctxParams.enableDedicatedDictSearch = 0;
+ cParams = ZSTD_getCParamsFromCCtxParams(
+ &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ }
+
+ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
+ cctxParams.cParams = cParams;
+ cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
+
+ cdict = ZSTD_createCDict_advanced_internal(dictSize,
+ dictLoadMethod, cctxParams.cParams,
+ cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
+ customMem);
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ cctxParams) )) {
+ ZSTD_freeCDict(cdict);
+ return NULL;
+ }
+
+ return cdict;
+}
+
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byCopy, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
+ ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
+ ZSTD_dlm_byRef, ZSTD_dct_auto,
+ cParams, ZSTD_defaultCMem);
+ if (cdict)
+ cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
+ return cdict;
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = cdict->customMem;
+ int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
+ ZSTD_cwksp_free(&cdict->workspace, cMem);
+ if (!cdictInWorkspace) {
+ ZSTD_customFree(cdict, cMem);
+ }
+ return 0;
+ }
+}
+
+/*! ZSTD_initStaticCDict_advanced() :
+ * Generate a digested dictionary in provided memory area.
+ * workspace: The memory area to emplace the dictionary into.
+ * Provided pointer must 8-bytes aligned.
+ * It must outlive dictionary usage.
+ * workspaceSize: Use ZSTD_estimateCDictSize()
+ * to determine how large workspace must be.
+ * cParams : use ZSTD_getCParams() to transform a compression level
+ * into its relevants cParams.
+ * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
+ * Note : there is no corresponding "free" function.
+ * Since workspace was allocated externally, it must be freed externally.
+ */
+const ZSTD_CDict* ZSTD_initStaticCDict(
+ void* workspace, size_t workspaceSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_compressionParameters cParams)
+{
+ ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
+ /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
+ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
+ size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0
+ : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
+ + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
+ + matchStateSize;
+ ZSTD_CDict* cdict;
+ ZSTD_CCtx_params params;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+
+ {
+ ZSTD_cwksp ws;
+ ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
+ cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
+ if (cdict == NULL) return NULL;
+ ZSTD_cwksp_move(&cdict->workspace, &ws);
+ }
+
+ DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
+ (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
+ if (workspaceSize < neededSize) return NULL;
+
+ ZSTD_CCtxParams_init(&params, 0);
+ params.cParams = cParams;
+ params.useRowMatchFinder = useRowMatchFinder;
+ cdict->useRowMatchFinder = useRowMatchFinder;
+
+ if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType,
+ params) ))
+ return NULL;
+
+ return cdict;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
+{
+ assert(cdict != NULL);
+ return cdict->matchState.cParams;
+}
+
+/*! ZSTD_getDictID_fromCDict() :
+ * Provides the dictID of the dictionary loaded into `cdict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
+{
+ if (cdict==NULL) return 0;
+ return cdict->dictID;
+}
+
+/* ZSTD_compressBegin_usingCDict_internal() :
+ * Implementation of various ZSTD_compressBegin_usingCDict* functions.
+ */
+static size_t ZSTD_compressBegin_usingCDict_internal(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ ZSTD_CCtx_params cctxParams;
+ DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
+ RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
+ /* Initialize the cctxParams from the cdict */
+ {
+ ZSTD_parameters params;
+ params.fParams = fParams;
+ params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
+ || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
+ || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
+ || cdict->compressionLevel == 0 ) ?
+ ZSTD_getCParamsFromCDict(cdict)
+ : ZSTD_getCParams(cdict->compressionLevel,
+ pledgedSrcSize,
+ cdict->dictContentSize);
+ ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
+ }
+ /* Increase window log to fit the entire dictionary and source if the
+ * source size is known. Limit the increase to 19, which is the
+ * window log for compression level 1 with the largest source size.
+ */
+ if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
+ U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
+ cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
+ }
+ return ZSTD_compressBegin_internal(cctx,
+ NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
+ cdict,
+ &cctxParams, pledgedSrcSize,
+ ZSTDb_not_buffered);
+}
+
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * This function is DEPRECATED.
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+ ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+ ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
+}
+
+/* ZSTD_compressBegin_usingCDict() :
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+/*! ZSTD_compress_usingCDict_internal():
+ * Implementation of various ZSTD_compress_usingCDict* functions.
+ */
+static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+/*! ZSTD_compress_usingCDict_advanced():
+ * This function is DEPRECATED.
+ */
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+/*! ZSTD_compress_usingCDict() :
+ * Compression using a digested Dictionary.
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ * Note that compression parameters are decided at CDict creation time
+ * while frame parameters are hardcoded */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_CDict* cdict)
+{
+ ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+ return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+
+
+/* ******************************************************************
+* Streaming
+********************************************************************/
+
+ZSTD_CStream* ZSTD_createCStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createCStream");
+ return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticCCtx(workspace, workspaceSize);
+}
+
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{ /* CStream and CCtx are now same object */
+ return ZSTD_createCCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+ return ZSTD_freeCCtx(zcs); /* same object */
+}
+
+
+
+/*====== Initialization ======*/
+
+size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_CStreamOutSize(void)
+{
+ return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
+}
+
+static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
+{
+ if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
+ return ZSTD_cpm_attachDict;
+ else
+ return ZSTD_cpm_noAttachDict;
+}
+
+/* ZSTD_resetCStream():
+ * pledgedSrcSize == 0 means "unknown" */
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ return 0;
+}
+
+/*! ZSTD_initCStream_internal() :
+ * Note : for lib/compress only. Used by zstdmt_compress.c.
+ * Assumption 1 : params are valid
+ * Assumption 2 : either dict, or cdict, is defined, not both */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_internal");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
+ zcs->requestedParams = *params;
+ assert(!((dict) && (cdict))); /* either dict or cdict, not both */
+ if (dict) {
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ } else {
+ /* Dictionary is cleared if !cdict */
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ }
+ return 0;
+}
+
+/* ZSTD_initCStream_usingCDict_advanced() :
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+ const ZSTD_CDict* cdict,
+ ZSTD_frameParameters fParams,
+ unsigned long long pledgedSrcSize)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ zcs->requestedParams.fParams = fParams;
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ return 0;
+}
+
+/* note : cdict must outlive compression session */
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
+ return 0;
+}
+
+
+/* ZSTD_initCStream_advanced() :
+ * pledgedSrcSize must be exact.
+ * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ ZSTD_parameters params, unsigned long long pss)
+{
+ /* for compatibility with older programs relying on this behavior.
+ * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
+ * This line will be removed in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_advanced");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
+ ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
+{
+ /* temporary : 0 interpreted as "unknown" during transition period.
+ * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
+ * 0 will be interpreted as "empty" in the future.
+ */
+ U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
+ DEBUGLOG(4, "ZSTD_initCStream_srcSize");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
+ return 0;
+}
+
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+{
+ DEBUGLOG(4, "ZSTD_initCStream");
+ FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
+ FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
+ return 0;
+}
+
+/*====== Compression ======*/
+
+static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
+{
+ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
+ if (hintInSize==0) hintInSize = cctx->blockSize;
+ return hintInSize;
+}
+
+/* ZSTD_compressStream_generic():
+ * internal function for all *compressStream*() variants
+ * non-static, because can be called from zstdmt_compress.c
+ * @return : hint size for next input */
+static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective const flushMode)
+{
+ const char* const istart = (const char*)input->src;
+ const char* const iend = input->size != 0 ? istart + input->size : istart;
+ const char* ip = input->pos != 0 ? istart + input->pos : istart;
+ char* const ostart = (char*)output->dst;
+ char* const oend = output->size != 0 ? ostart + output->size : ostart;
+ char* op = output->pos != 0 ? ostart + output->pos : ostart;
+ U32 someMoreWork = 1;
+
+ /* check expectations */
+ DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->inBuff != NULL);
+ assert(zcs->inBuffSize > 0);
+ }
+ if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
+ assert(zcs->outBuff != NULL);
+ assert(zcs->outBuffSize > 0);
+ }
+ assert(output->pos <= output->size);
+ assert(input->pos <= input->size);
+ assert((U32)flushMode <= (U32)ZSTD_e_end);
+
+ while (someMoreWork) {
+ switch(zcs->streamStage)
+ {
+ case zcss_init:
+ RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
+
+ case zcss_load:
+ if ( (flushMode == ZSTD_e_end)
+ && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
+ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
+ && (zcs->inBuffPos == 0) ) {
+ /* shortcut to compression pass directly into output buffer */
+ size_t const cSize = ZSTD_compressEnd(zcs,
+ op, oend-op, ip, iend-ip);
+ DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
+ ip = iend;
+ op += cSize;
+ zcs->frameEnded = 1;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ someMoreWork = 0; break;
+ }
+ /* complete loading into inBuffer in buffered mode */
+ if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+ size_t const loaded = ZSTD_limitCopy(
+ zcs->inBuff + zcs->inBuffPos, toLoad,
+ ip, iend-ip);
+ zcs->inBuffPos += loaded;
+ if (loaded != 0)
+ ip += loaded;
+ if ( (flushMode == ZSTD_e_continue)
+ && (zcs->inBuffPos < zcs->inBuffTarget) ) {
+ /* not enough input to fill full block : stop here */
+ someMoreWork = 0; break;
+ }
+ if ( (flushMode == ZSTD_e_flush)
+ && (zcs->inBuffPos == zcs->inToCompress) ) {
+ /* empty */
+ someMoreWork = 0; break;
+ }
+ }
+ /* compress current block (note : this stage cannot be stopped in the middle) */
+ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
+ { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
+ void* cDst;
+ size_t cSize;
+ size_t oSize = oend-op;
+ size_t const iSize = inputBuffered
+ ? zcs->inBuffPos - zcs->inToCompress
+ : MIN((size_t)(iend - ip), zcs->blockSize);
+ if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
+ cDst = op; /* compress into output buffer, to skip flush stage */
+ else
+ cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+ if (inputBuffered) {
+ unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize,
+ zcs->inBuff + zcs->inToCompress, iSize);
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ /* prepare next block */
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+ if (zcs->inBuffTarget > zcs->inBuffSize)
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+ DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
+ (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
+ if (!lastBlock)
+ assert(zcs->inBuffTarget <= zcs->inBuffSize);
+ zcs->inToCompress = zcs->inBuffPos;
+ } else {
+ unsigned const lastBlock = (ip + iSize == iend);
+ assert(flushMode == ZSTD_e_end /* Already validated */);
+ cSize = lastBlock ?
+ ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
+ ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
+ /* Consume the input prior to error checking to mirror buffered mode. */
+ if (iSize > 0)
+ ip += iSize;
+ FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
+ zcs->frameEnded = lastBlock;
+ if (lastBlock)
+ assert(ip == iend);
+ }
+ if (cDst == op) { /* no need to flush */
+ op += cSize;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed directly in outBuffer");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ }
+ break;
+ }
+ zcs->outBuffContentSize = cSize;
+ zcs->outBuffFlushedSize = 0;
+ zcs->streamStage = zcss_flush; /* pass-through to flush stage */
+ }
+ ZSTD_FALLTHROUGH;
+ case zcss_flush:
+ DEBUGLOG(5, "flush stage");
+ assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+ size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
+ zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+ DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
+ (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
+ if (flushed)
+ op += flushed;
+ zcs->outBuffFlushedSize += flushed;
+ if (toFlush!=flushed) {
+ /* flush not fully completed, presumably because dst is too small */
+ assert(op==oend);
+ someMoreWork = 0;
+ break;
+ }
+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+ if (zcs->frameEnded) {
+ DEBUGLOG(5, "Frame completed on flush");
+ someMoreWork = 0;
+ ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
+ break;
+ }
+ zcs->streamStage = zcss_load;
+ break;
+ }
+
+ default: /* impossible */
+ assert(0);
+ }
+ }
+
+ input->pos = ip - istart;
+ output->pos = op - ostart;
+ if (zcs->frameEnded) return 0;
+ return ZSTD_nextInputSizeHint(zcs);
+}
+
+static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
+{
+ return ZSTD_nextInputSizeHint(cctx);
+
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
+ return ZSTD_nextInputSizeHint_MTorST(zcs);
+}
+
+/* After a compression call set the expected input/output buffer.
+ * This is validated at the start of the next compression call.
+ */
+static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ cctx->expectedInBuffer = *input;
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ cctx->expectedOutBufferSize = output->size - output->pos;
+ }
+}
+
+/* Validate that the input/output buffers match the expectations set by
+ * ZSTD_setBufferExpectations.
+ */
+static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
+ ZSTD_outBuffer const* output,
+ ZSTD_inBuffer const* input,
+ ZSTD_EndDirective endOp)
+{
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
+ ZSTD_inBuffer const expect = cctx->expectedInBuffer;
+ if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
+ if (endOp != ZSTD_e_end)
+ RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
+ }
+ if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
+ size_t const outBufferSize = output->size - output->pos;
+ if (cctx->expectedOutBufferSize != outBufferSize)
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
+ }
+ return 0;
+}
+
+static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
+ ZSTD_EndDirective endOp,
+ size_t inSize) {
+ ZSTD_CCtx_params params = cctx->requestedParams;
+ ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+ FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
+ ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
+ assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
+ if (cctx->cdict && !cctx->localDict.cdict) {
+ /* Let the cdict's compression level take priority over the requested params.
+ * But do not take the cdict's compression level if the "cdict" is actually a localDict
+ * generated from ZSTD_initLocalDict().
+ */
+ params.compressionLevel = cctx->cdict->compressionLevel;
+ }
+ DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
+ if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
+ {
+ size_t const dictSize = prefixDict.dict
+ ? prefixDict.dictSize
+ : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
+ ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
+ params.cParams = ZSTD_getCParamsFromCCtxParams(
+ &params, cctx->pledgedSrcSizePlusOne-1,
+ dictSize, mode);
+ }
+
+ params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams);
+ params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams);
+ params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
+
+ { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
+ assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+ FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
+ prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
+ cctx->cdict,
+ &params, pledgedSrcSize,
+ ZSTDb_buffered) , "");
+ assert(cctx->appliedParams.nbWorkers == 0);
+ cctx->inToCompress = 0;
+ cctx->inBuffPos = 0;
+ if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
+ /* for small input: avoid automatic flush on reaching end of block, since
+ * it would require to add a 3-bytes null block to end frame
+ */
+ cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
+ } else {
+ cctx->inBuffTarget = 0;
+ }
+ cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
+ cctx->streamStage = zcss_load;
+ cctx->frameEnded = 0;
+ }
+ return 0;
+}
+
+size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
+ ZSTD_outBuffer* output,
+ ZSTD_inBuffer* input,
+ ZSTD_EndDirective endOp)
+{
+ DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
+ /* check conditions */
+ RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
+ RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
+ RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
+ assert(cctx != NULL);
+
+ /* transparent initialization stage */
+ if (cctx->streamStage == zcss_init) {
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
+ ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
+ }
+ /* end of transparent initialization stage */
+
+ FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
+ /* compression stage */
+ FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
+ DEBUGLOG(5, "completed ZSTD_compressStream2");
+ ZSTD_setBufferExpectations(cctx, output, input);
+ return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
+}
+
+size_t ZSTD_compressStream2_simpleArgs (
+ ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos,
+ ZSTD_EndDirective endOp)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
+
+size_t ZSTD_compress2(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
+ ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
+ DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
+ ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
+ /* Enable stable input/output buffers. */
+ cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
+ cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
+ { size_t oPos = 0;
+ size_t iPos = 0;
+ size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
+ dst, dstCapacity, &oPos,
+ src, srcSize, &iPos,
+ ZSTD_e_end);
+ /* Reset to the original values. */
+ cctx->requestedParams.inBufferMode = originalInBufferMode;
+ cctx->requestedParams.outBufferMode = originalOutBufferMode;
+ FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
+ if (result != 0) { /* compression not completed, due to lack of output space */
+ assert(oPos == dstCapacity);
+ RETURN_ERROR(dstSize_tooSmall, "");
+ }
+ assert(iPos == srcSize); /* all input is expected consumed */
+ return oPos;
+ }
+}
+
+typedef struct {
+ U32 idx; /* Index in array of ZSTD_Sequence */
+ U32 posInSequence; /* Position within sequence at idx */
+ size_t posInSrc; /* Number of bytes given by sequences provided so far */
+} ZSTD_sequencePosition;
+
+/* ZSTD_validateSequence() :
+ * @offCode : is presumed to follow format required by ZSTD_storeSeq()
+ * @returns a ZSTD error code if sequence is not valid
+ */
+static size_t
+ZSTD_validateSequence(U32 offCode, U32 matchLength,
+ size_t posInSrc, U32 windowLog, size_t dictSize)
+{
+ U32 const windowSize = 1 << windowLog;
+ /* posInSrc represents the amount of data the decoder would decode up to this point.
+ * As long as the amount of data decoded is less than or equal to window size, offsets may be
+ * larger than the total length of output decoded in order to reference the dict, even larger than
+ * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
+ */
+ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
+ RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
+ RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
+ return 0;
+}
+
+/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
+static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
+{
+ U32 offCode = STORE_OFFSET(rawOffset);
+
+ if (!ll0 && rawOffset == rep[0]) {
+ offCode = STORE_REPCODE_1;
+ } else if (rawOffset == rep[1]) {
+ offCode = STORE_REPCODE(2 - ll0);
+ } else if (rawOffset == rep[2]) {
+ offCode = STORE_REPCODE(3 - ll0);
+ } else if (ll0 && rawOffset == rep[0] - 1) {
+ offCode = STORE_REPCODE_3;
+ }
+ return offCode;
+}
+
+/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
+ * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
+ */
+static size_t
+ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
+ ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize)
+{
+ U32 idx = seqPos->idx;
+ BYTE const* ip = (BYTE const*)(src);
+ const BYTE* const iend = ip + blockSize;
+ repcodes_t updatedRepcodes;
+ U32 dictSize;
+
+ if (cctx->cdict) {
+ dictSize = (U32)cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = (U32)cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
+ U32 const litLength = inSeqs[idx].litLength;
+ U32 const ll0 = (litLength == 0);
+ U32 const matchLength = inSeqs[idx].matchLength;
+ U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize),
+ "Sequence validation failed");
+ }
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
+ ip += matchLength + litLength;
+ }
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ if (inSeqs[idx].litLength) {
+ DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
+ ip += inSeqs[idx].litLength;
+ seqPos->posInSrc += inSeqs[idx].litLength;
+ }
+ RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
+ seqPos->idx = idx+1;
+ return 0;
+}
+
+/* Returns the number of bytes to move the current read position back by. Only non-zero
+ * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
+ * went wrong.
+ *
+ * This function will attempt to scan through blockSize bytes represented by the sequences
+ * in inSeqs, storing any (partial) sequences.
+ *
+ * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
+ * avoid splitting a match, or to avoid splitting a match such that it would produce a match
+ * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
+ */
+static size_t
+ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize)
+{
+ U32 idx = seqPos->idx;
+ U32 startPosInSequence = seqPos->posInSequence;
+ U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
+ size_t dictSize;
+ BYTE const* ip = (BYTE const*)(src);
+ BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
+ repcodes_t updatedRepcodes;
+ U32 bytesAdjustment = 0;
+ U32 finalMatchSplit = 0;
+
+ if (cctx->cdict) {
+ dictSize = cctx->cdict->dictContentSize;
+ } else if (cctx->prefixDict.dict) {
+ dictSize = cctx->prefixDict.dictSize;
+ } else {
+ dictSize = 0;
+ }
+ DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
+ DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
+ while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
+ const ZSTD_Sequence currSeq = inSeqs[idx];
+ U32 litLength = currSeq.litLength;
+ U32 matchLength = currSeq.matchLength;
+ U32 const rawOffset = currSeq.offset;
+ U32 offCode;
+
+ /* Modify the sequence depending on where endPosInSequence lies */
+ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
+ if (startPosInSequence >= litLength) {
+ startPosInSequence -= litLength;
+ litLength = 0;
+ matchLength -= startPosInSequence;
+ } else {
+ litLength -= startPosInSequence;
+ }
+ /* Move to the next sequence */
+ endPosInSequence -= currSeq.litLength + currSeq.matchLength;
+ startPosInSequence = 0;
+ idx++;
+ } else {
+ /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
+ does not reach the end of the match. So, we have to split the sequence */
+ DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
+ currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
+ if (endPosInSequence > litLength) {
+ U32 firstHalfMatchLength;
+ litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
+ firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
+ if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
+ /* Only ever split the match if it is larger than the block size */
+ U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
+ if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
+ /* Move the endPosInSequence backward so that it creates match of minMatch length */
+ endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
+ firstHalfMatchLength -= bytesAdjustment;
+ }
+ matchLength = firstHalfMatchLength;
+ /* Flag that we split the last match - after storing the sequence, exit the loop,
+ but keep the value of endPosInSequence */
+ finalMatchSplit = 1;
+ } else {
+ /* Move the position in sequence backwards so that we don't split match, and break to store
+ * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
+ * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
+ * would cause the first half of the match to be too small
+ */
+ bytesAdjustment = endPosInSequence - currSeq.litLength;
+ endPosInSequence = currSeq.litLength;
+ break;
+ }
+ } else {
+ /* This sequence ends inside the literals, break to store the last literals */
+ break;
+ }
+ }
+ /* Check if this offset can be represented with a repcode */
+ { U32 const ll0 = (litLength == 0);
+ offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
+ ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
+ }
+
+ if (cctx->appliedParams.validateSequences) {
+ seqPos->posInSrc += litLength + matchLength;
+ FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
+ cctx->appliedParams.cParams.windowLog, dictSize),
+ "Sequence validation failed");
+ }
+ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
+ RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
+ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
+ ip += matchLength + litLength;
+ }
+ DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
+ assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
+ seqPos->idx = idx;
+ seqPos->posInSequence = endPosInSequence;
+ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
+
+ iend -= bytesAdjustment;
+ if (ip != iend) {
+ /* Store any last literals */
+ U32 lastLLSize = (U32)(iend - ip);
+ assert(ip <= iend);
+ DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
+ ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
+ seqPos->posInSrc += lastLLSize;
+ }
+
+ return bytesAdjustment;
+}
+
+typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
+ const void* src, size_t blockSize);
+static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
+{
+ ZSTD_sequenceCopier sequenceCopier = NULL;
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
+ if (mode == ZSTD_sf_explicitBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
+ } else if (mode == ZSTD_sf_noBlockDelimiters) {
+ return ZSTD_copySequencesToSeqStoreNoBlockDelim;
+ }
+ assert(sequenceCopier != NULL);
+ return sequenceCopier;
+}
+
+/* Compress, block-by-block, all of the sequences given.
+ *
+ * Returns the cumulative size of all compressed blocks (including their headers),
+ * otherwise a ZSTD error.
+ */
+static size_t
+ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize)
+{
+ size_t cSize = 0;
+ U32 lastBlock;
+ size_t blockSize;
+ size_t compressedSeqsSize;
+ size_t remaining = srcSize;
+ ZSTD_sequencePosition seqPos = {0, 0, 0};
+
+ BYTE const* ip = (BYTE const*)src;
+ BYTE* op = (BYTE*)dst;
+ ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
+
+ DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
+ /* Special case: empty frame */
+ if (remaining == 0) {
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
+ MEM_writeLE32(op, cBlockHeader24);
+ op += ZSTD_blockHeaderSize;
+ dstCapacity -= ZSTD_blockHeaderSize;
+ cSize += ZSTD_blockHeaderSize;
+ }
+
+ while (remaining) {
+ size_t cBlockSize;
+ size_t additionalByteAdjustment;
+ lastBlock = remaining <= cctx->blockSize;
+ blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
+ ZSTD_resetSeqStore(&cctx->seqStore);
+ DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
+
+ additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
+ FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
+ blockSize -= additionalByteAdjustment;
+
+ /* If blocks are too small, emit as a nocompress block */
+ if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
+ cSize += cBlockSize;
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ continue;
+ }
+
+ compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
+ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
+ &cctx->appliedParams,
+ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
+ blockSize,
+ cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
+ cctx->bmi2);
+ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
+ DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
+
+ if (!cctx->isFirstBlock &&
+ ZSTD_maybeRLE(&cctx->seqStore) &&
+ ZSTD_isRLE((BYTE const*)src, srcSize)) {
+ /* We don't want to emit our first block as a RLE even if it qualifies because
+ * doing so will cause the decoder (cli only) to throw a "should consume all input error."
+ * This is only an issue for zstd <= v1.4.3
+ */
+ compressedSeqsSize = 1;
+ }
+
+ if (compressedSeqsSize == 0) {
+ /* ZSTD_noCompressBlock writes the block header as well */
+ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
+ DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
+ } else if (compressedSeqsSize == 1) {
+ cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
+ FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
+ DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
+ } else {
+ U32 cBlockHeader;
+ /* Error checking and repcodes update */
+ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
+ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
+ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
+
+ /* Write block header into beginning of block*/
+ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
+ MEM_writeLE24(op, cBlockHeader);
+ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
+ DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
+ }
+
+ cSize += cBlockSize;
+ DEBUGLOG(4, "cSize running total: %zu", cSize);
+
+ if (lastBlock) {
+ break;
+ } else {
+ ip += blockSize;
+ op += cBlockSize;
+ remaining -= blockSize;
+ dstCapacity -= cBlockSize;
+ cctx->isFirstBlock = 0;
+ }
+ }
+
+ return cSize;
+}
+
+size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
+ const void* src, size_t srcSize)
+{
+ BYTE* op = (BYTE*)dst;
+ size_t cSize = 0;
+ size_t compressedBlocksSize = 0;
+ size_t frameHeaderSize = 0;
+
+ /* Transparent initialization stage, same as compressStream2() */
+ DEBUGLOG(3, "ZSTD_compressSequences()");
+ assert(cctx != NULL);
+ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
+ /* Begin writing output, starting with frame header */
+ frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
+ op += frameHeaderSize;
+ dstCapacity -= frameHeaderSize;
+ cSize += frameHeaderSize;
+ if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
+ xxh64_update(&cctx->xxhState, src, srcSize);
+ }
+ /* cSize includes block header size and compressed sequences size */
+ compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
+ op, dstCapacity,
+ inSeqs, inSeqsSize,
+ src, srcSize);
+ FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
+ cSize += compressedBlocksSize;
+ dstCapacity -= compressedBlocksSize;
+
+ if (cctx->appliedParams.fParams.checksumFlag) {
+ U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
+ RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
+ DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
+ MEM_writeLE32((char*)dst + cSize, checksum);
+ cSize += 4;
+ }
+
+ DEBUGLOG(3, "Final compressed size: %zu", cSize);
+ return cSize;
+}
+
+/*====== Finalize ======*/
+
+/*! ZSTD_flushStream() :
+ * @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
+}
+
+
+size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+ ZSTD_inBuffer input = { NULL, 0, 0 };
+ size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
+ FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
+ if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
+ /* single thread mode : attempt to calculate remaining to flush more precisely */
+ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
+ size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
+ size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
+ DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
+ return toFlush;
+ }
+}
+
+
+/*-===== Pre-defined compression levels =====-*/
+#include "clevels.h"
+
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
+int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
+
+static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
+{
+ ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
+ switch (cParams.strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+ return cParams;
+}
+
+static int ZSTD_dedicatedDictSearch_isSupported(
+ ZSTD_compressionParameters const* cParams)
+{
+ return (cParams->strategy >= ZSTD_greedy)
+ && (cParams->strategy <= ZSTD_lazy2)
+ && (cParams->hashLog > cParams->chainLog)
+ && (cParams->chainLog <= 24);
+}
+
+/*
+ * Reverses the adjustment applied to cparams when enabling dedicated dict
+ * search. This is used to recover the params set to be used in the working
+ * context. (Otherwise, those tables would also grow.)
+ */
+static void ZSTD_dedicatedDictSearch_revertCParams(
+ ZSTD_compressionParameters* cParams) {
+ switch (cParams->strategy) {
+ case ZSTD_fast:
+ case ZSTD_dfast:
+ break;
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
+ if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
+ cParams->hashLog = ZSTD_HASHLOG_MIN;
+ }
+ break;
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ }
+}
+
+static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ switch (mode) {
+ case ZSTD_cpm_unknown:
+ case ZSTD_cpm_noAttachDict:
+ case ZSTD_cpm_createCDict:
+ break;
+ case ZSTD_cpm_attachDict:
+ dictSize = 0;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+ { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
+ size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
+ return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
+ }
+}
+
+/*! ZSTD_getCParams_internal() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
+ * Use dictSize == 0 for unknown or unused.
+ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
+static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
+{
+ U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
+ int row;
+ DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
+
+ /* row */
+ if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
+ else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
+ else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+ else row = compressionLevel;
+
+ { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
+ DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
+ /* acceleration factor */
+ if (compressionLevel < 0) {
+ int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
+ cp.targetLength = (unsigned)(-clampedCompressionLevel);
+ }
+ /* refine parameters based on srcSize & dictSize */
+ return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
+ }
+}
+
+/*! ZSTD_getCParams() :
+ * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+ * Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
+ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
+ ZSTD_parameters params;
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
+ DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
+ ZSTD_memset(&params, 0, sizeof(params));
+ params.cParams = cParams;
+ params.fParams.contentSizeFlag = 1;
+ return params;
+}
+
+/*! ZSTD_getParams() :
+ * same idea as ZSTD_getCParams()
+ * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
+ * Fields of `ZSTD_frameParameters` are set to default values */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
+ return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
+}
diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
new file mode 100644
index 000000000..71697a11a
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_internal.h
@@ -0,0 +1,1399 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This header contains definitions
+ * that shall **only** be used by modules within lib/compress.
+ */
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_internal.h"
+#include "zstd_cwksp.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+#define kSearchStrength 8
+#define HASH_READ_SIZE 8
+#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
+ It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
+ It's not a big deal though : candidate will just be sorted again.
+ Additionally, candidate position 1 will be lost.
+ But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
+ This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
+
+
+/*-*************************************
+* Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+ const void* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+} ZSTD_prefixDict;
+
+typedef struct {
+ void* dictBuffer;
+ void const* dict;
+ size_t dictSize;
+ ZSTD_dictContentType_e dictContentType;
+ ZSTD_CDict* cdict;
+} ZSTD_localDict;
+
+typedef struct {
+ HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)];
+ HUF_repeat repeatMode;
+} ZSTD_hufCTables_t;
+
+typedef struct {
+ FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+ FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+ FSE_repeat offcode_repeatMode;
+ FSE_repeat matchlength_repeatMode;
+ FSE_repeat litlength_repeatMode;
+} ZSTD_fseCTables_t;
+
+typedef struct {
+ ZSTD_hufCTables_t huf;
+ ZSTD_fseCTables_t fse;
+} ZSTD_entropyCTables_t;
+
+/* *********************************************
+* Entropy buffer statistics structs and funcs *
+***********************************************/
+/* ZSTD_hufCTablesMetadata_t :
+ * Stores Literals Block Type for a super-block in hType, and
+ * huffman tree description in hufDesBuffer.
+ * hufDesSize refers to the size of huffman tree description in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
+typedef struct {
+ symbolEncodingType_e hType;
+ BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
+ size_t hufDesSize;
+} ZSTD_hufCTablesMetadata_t;
+
+/* ZSTD_fseCTablesMetadata_t :
+ * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
+ * fse tables in fseTablesBuffer.
+ * fseTablesSize refers to the size of fse tables in bytes.
+ * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
+typedef struct {
+ symbolEncodingType_e llType;
+ symbolEncodingType_e ofType;
+ symbolEncodingType_e mlType;
+ BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
+ size_t fseTablesSize;
+ size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
+} ZSTD_fseCTablesMetadata_t;
+
+typedef struct {
+ ZSTD_hufCTablesMetadata_t hufMetadata;
+ ZSTD_fseCTablesMetadata_t fseMetadata;
+} ZSTD_entropyCTablesMetadata_t;
+
+/* ZSTD_buildBlockEntropyStats() :
+ * Builds entropy for the block.
+ * @return : 0 on success or error code */
+size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
+ const ZSTD_entropyCTables_t* prevEntropy,
+ ZSTD_entropyCTables_t* nextEntropy,
+ const ZSTD_CCtx_params* cctxParams,
+ ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize);
+
+/* *******************************
+* Compression internals structs *
+*********************************/
+
+typedef struct {
+ U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
+ U32 len; /* Raw length of match */
+} ZSTD_match_t;
+
+typedef struct {
+ U32 offset; /* Offset of sequence */
+ U32 litLength; /* Length of literals prior to match */
+ U32 matchLength; /* Raw length of match */
+} rawSeq;
+
+typedef struct {
+ rawSeq* seq; /* The start of the sequences */
+ size_t pos; /* The index in seq where reading stopped. pos <= size. */
+ size_t posInSequence; /* The position within the sequence at seq[pos] where reading
+ stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
+ size_t size; /* The number of sequences. <= capacity. */
+ size_t capacity; /* The capacity starting from `seq` pointer */
+} rawSeqStore_t;
+
+UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
+
+typedef struct {
+ int price;
+ U32 off;
+ U32 mlen;
+ U32 litlen;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
+
+typedef struct {
+ /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+ unsigned* litFreq; /* table of literals statistics, of size 256 */
+ unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
+ unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
+ unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
+ ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */
+ ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+
+ U32 litSum; /* nb of literals */
+ U32 litLengthSum; /* nb of litLength codes */
+ U32 matchLengthSum; /* nb of matchLength codes */
+ U32 offCodeSum; /* nb of offset codes */
+ U32 litSumBasePrice; /* to compare to log2(litfreq) */
+ U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
+ U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
+ U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
+ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
+ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
+ ZSTD_paramSwitch_e literalCompressionMode;
+} optState_t;
+
+typedef struct {
+ ZSTD_entropyCTables_t entropy;
+ U32 rep[ZSTD_REP_NUM];
+} ZSTD_compressedBlockState_t;
+
+typedef struct {
+ BYTE const* nextSrc; /* next block here to continue on current prefix */
+ BYTE const* base; /* All regular indexes relative to this position */
+ BYTE const* dictBase; /* extDict indexes relative to this position */
+ U32 dictLimit; /* below that point, need extDict */
+ U32 lowLimit; /* below that point, no more valid data */
+ U32 nbOverflowCorrections; /* Number of times overflow correction has run since
+ * ZSTD_window_init(). Useful for debugging coredumps
+ * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
+ */
+} ZSTD_window_t;
+
+#define ZSTD_WINDOW_START_INDEX 2
+
+typedef struct ZSTD_matchState_t ZSTD_matchState_t;
+
+#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
+
+struct ZSTD_matchState_t {
+ ZSTD_window_t window; /* State for window round buffer management */
+ U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
+ * When loadedDictEnd != 0, a dictionary is in use, and still valid.
+ * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
+ * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
+ * When dict referential is copied into active context (i.e. not attached),
+ * loadedDictEnd == dictSize, since referential starts from zero.
+ */
+ U32 nextToUpdate; /* index from which to continue table update */
+ U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
+
+ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
+ U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
+ U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
+
+ U32* hashTable;
+ U32* hashTable3;
+ U32* chainTable;
+
+ U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
+
+ int dedicatedDictSearch; /* Indicates whether this matchState is using the
+ * dedicated dictionary search structure.
+ */
+ optState_t opt; /* optimal parser state */
+ const ZSTD_matchState_t* dictMatchState;
+ ZSTD_compressionParameters cParams;
+ const rawSeqStore_t* ldmSeqStore;
+};
+
+typedef struct {
+ ZSTD_compressedBlockState_t* prevCBlock;
+ ZSTD_compressedBlockState_t* nextCBlock;
+ ZSTD_matchState_t matchState;
+} ZSTD_blockState_t;
+
+typedef struct {
+ U32 offset;
+ U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
+ BYTE const* split;
+ U32 hash;
+ U32 checksum;
+ ldmEntry_t* bucket;
+} ldmMatchCandidate_t;
+
+#define LDM_BATCH_SIZE 64
+
+typedef struct {
+ ZSTD_window_t window; /* State for the window round buffer management */
+ ldmEntry_t* hashTable;
+ U32 loadedDictEnd;
+ BYTE* bucketOffsets; /* Next position in bucket to insert entry */
+ size_t splitIndices[LDM_BATCH_SIZE];
+ ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
+} ldmState_t;
+
+typedef struct {
+ ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
+ U32 hashLog; /* Log size of hashTable */
+ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
+ U32 minMatchLength; /* Minimum match length */
+ U32 hashRateLog; /* Log number of entries to skip */
+ U32 windowLog; /* Window log for the LDM */
+} ldmParams_t;
+
+typedef struct {
+ int collectSequences;
+ ZSTD_Sequence* seqStart;
+ size_t seqIndex;
+ size_t maxSequences;
+} SeqCollector;
+
+struct ZSTD_CCtx_params_s {
+ ZSTD_format_e format;
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+
+ int compressionLevel;
+ int forceWindow; /* force back-references to respect limit of
+ * 1<<wLog, even for dictionary */
+ size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
+ * No target when targetCBlockSize == 0.
+ * There is no guarantee on compressed block size */
+ int srcSizeHint; /* User's best guess of source size.
+ * Hint is not valid when srcSizeHint == 0.
+ * There is no guarantee that hint is close to actual source size */
+
+ ZSTD_dictAttachPref_e attachDictPref;
+ ZSTD_paramSwitch_e literalCompressionMode;
+
+ /* Multithreading: used to pass parameters to mtctx */
+ int nbWorkers;
+ size_t jobSize;
+ int overlapLog;
+ int rsyncable;
+
+ /* Long distance matching parameters */
+ ldmParams_t ldmParams;
+
+ /* Dedicated dict search algorithm trigger */
+ int enableDedicatedDictSearch;
+
+ /* Input/output buffer modes */
+ ZSTD_bufferMode_e inBufferMode;
+ ZSTD_bufferMode_e outBufferMode;
+
+ /* Sequence compression API */
+ ZSTD_sequenceFormat_e blockDelimiters;
+ int validateSequences;
+
+ /* Block splitting */
+ ZSTD_paramSwitch_e useBlockSplitter;
+
+ /* Param for deciding whether to use row-based matchfinder */
+ ZSTD_paramSwitch_e useRowMatchFinder;
+
+ /* Always load a dictionary in ext-dict mode (not prefix mode)? */
+ int deterministicRefPrefix;
+
+ /* Internal use, for createCCtxParams() and freeCCtxParams() only */
+ ZSTD_customMem customMem;
+}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
+#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
+#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
+
+/*
+ * Indicates whether this compression proceeds directly from user-provided
+ * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
+ * whether the context needs to buffer the input/output (ZSTDb_buffered).
+ */
+typedef enum {
+ ZSTDb_not_buffered,
+ ZSTDb_buffered
+} ZSTD_buffered_policy_e;
+
+/*
+ * Struct that contains all elements of block splitter that should be allocated
+ * in a wksp.
+ */
+#define ZSTD_MAX_NB_BLOCK_SPLITS 196
+typedef struct {
+ seqStore_t fullSeqStoreChunk;
+ seqStore_t firstHalfSeqStore;
+ seqStore_t secondHalfSeqStore;
+ seqStore_t currSeqStore;
+ seqStore_t nextSeqStore;
+
+ U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+} ZSTD_blockSplitCtx;
+
+struct ZSTD_CCtx_s {
+ ZSTD_compressionStage_e stage;
+ int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+ ZSTD_CCtx_params requestedParams;
+ ZSTD_CCtx_params appliedParams;
+ ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
+ U32 dictID;
+ size_t dictContentSize;
+
+ ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
+ size_t blockSize;
+ unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
+ unsigned long long consumedSrcSize;
+ unsigned long long producedCSize;
+ struct xxh64_state xxhState;
+ ZSTD_customMem customMem;
+ ZSTD_threadPool* pool;
+ size_t staticSize;
+ SeqCollector seqCollector;
+ int isFirstBlock;
+ int initialized;
+
+ seqStore_t seqStore; /* sequences storage ptrs */
+ ldmState_t ldmState; /* long distance matching state */
+ rawSeq* ldmSequences; /* Storage for the ldm output sequences */
+ size_t maxNbLdmSequences;
+ rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+ ZSTD_blockState_t blockState;
+ U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
+
+ /* Whether we are streaming or not */
+ ZSTD_buffered_policy_e bufferedPolicy;
+
+ /* streaming */
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inToCompress;
+ size_t inBuffPos;
+ size_t inBuffTarget;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outBuffContentSize;
+ size_t outBuffFlushedSize;
+ ZSTD_cStreamStage streamStage;
+ U32 frameEnded;
+
+ /* Stable in/out buffer verification */
+ ZSTD_inBuffer expectedInBuffer;
+ size_t expectedOutBufferSize;
+
+ /* Dictionary */
+ ZSTD_localDict localDict;
+ const ZSTD_CDict* cdict;
+ ZSTD_prefixDict prefixDict; /* single-usage dictionary */
+
+ /* Multi-threading */
+
+ /* Tracing */
+
+ /* Workspace for block splitter */
+ ZSTD_blockSplitCtx blockSplitCtx;
+};
+
+typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
+
+typedef enum {
+ ZSTD_noDict = 0,
+ ZSTD_extDict = 1,
+ ZSTD_dictMatchState = 2,
+ ZSTD_dedicatedDictSearch = 3
+} ZSTD_dictMode_e;
+
+typedef enum {
+ ZSTD_cpm_noAttachDict = 0, /* Compression with ZSTD_noDict or ZSTD_extDict.
+ * In this mode we use both the srcSize and the dictSize
+ * when selecting and adjusting parameters.
+ */
+ ZSTD_cpm_attachDict = 1, /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
+ * In this mode we only take the srcSize into account when selecting
+ * and adjusting parameters.
+ */
+ ZSTD_cpm_createCDict = 2, /* Creating a CDict.
+ * In this mode we take both the source size and the dictionary size
+ * into account when selecting and adjusting the parameters.
+ */
+ ZSTD_cpm_unknown = 3, /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
+ * We don't know what these parameters are for. We default to the legacy
+ * behavior of taking both the source size and the dict size into account
+ * when selecting and adjusting parameters.
+ */
+} ZSTD_cParamMode_e;
+
+typedef size_t (*ZSTD_blockCompressor) (
+ ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
+
+
+MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
+{
+ static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 17, 18, 18, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24 };
+ static const U32 LL_deltaCode = 19;
+ return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+}
+
+/* ZSTD_MLcode() :
+ * note : mlBase = matchLength - MINMATCH;
+ * because it's the format it's stored in seqStore->sequences */
+MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
+{
+ static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+ static const U32 ML_deltaCode = 36;
+ return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
+}
+
+/* ZSTD_cParam_withinBounds:
+ * @return 1 if value is within cParam bounds,
+ * 0 otherwise */
+MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+/* ZSTD_noCompressBlock() :
+ * Writes uncompressed block to dst buffer from given src.
+ * Returns the size of the block */
+MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
+{
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
+ dstSize_tooSmall, "dst buf too small for uncompressed block");
+ MEM_writeLE24(dst, cBlockHeader24);
+ ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+ return ZSTD_blockHeaderSize + srcSize;
+}
+
+MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
+{
+ BYTE* const op = (BYTE*)dst;
+ U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
+ RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
+ MEM_writeLE24(op, cBlockHeader);
+ op[3] = src;
+ return 4;
+}
+
+
+/* ZSTD_minGain() :
+ * minimum compression required
+ * to generate a compress block or a compressed literals section.
+ * note : use same formula for both situations */
+MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
+{
+ U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
+ ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
+ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
+ return (srcSize >> minlog) + 2;
+}
+
+MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
+{
+ switch (cctxParams->literalCompressionMode) {
+ case ZSTD_ps_enable:
+ return 0;
+ case ZSTD_ps_disable:
+ return 1;
+ default:
+ assert(0 /* impossible: pre-validated */);
+ ZSTD_FALLTHROUGH;
+ case ZSTD_ps_auto:
+ return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
+ }
+}
+
+/*! ZSTD_safecopyLiterals() :
+ * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
+ * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
+ * large copies.
+ */
+static void
+ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
+{
+ assert(iend > ilimit_w);
+ if (ip <= ilimit_w) {
+ ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
+ op += ilimit_w - ip;
+ ip = ilimit_w;
+ }
+ while (ip < iend) *op++ = *ip++;
+}
+
+#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
+#define STORE_REPCODE_1 STORE_REPCODE(1)
+#define STORE_REPCODE_2 STORE_REPCODE(2)
+#define STORE_REPCODE_3 STORE_REPCODE(3)
+#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1)
+#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE)
+#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE)
+#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE)
+#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE)
+#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */
+#define STORED_TO_OFFBASE(o) ((o)+1)
+#define OFFBASE_TO_STORED(o) ((o)-1)
+
+/*! ZSTD_storeSeq() :
+ * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
+ * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
+ * @matchLength : must be >= MINMATCH
+ * Allowed to overread literals up to litLimit.
+*/
+HINT_INLINE UNUSED_ATTR void
+ZSTD_storeSeq(seqStore_t* seqStorePtr,
+ size_t litLength, const BYTE* literals, const BYTE* litLimit,
+ U32 offBase_minus1,
+ size_t matchLength)
+{
+ BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
+ BYTE const* const litEnd = literals + litLength;
+#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
+ static const BYTE* g_start = NULL;
+ if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
+ { U32 const pos = (U32)((const BYTE*)literals - g_start);
+ DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
+ pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1);
+ }
+#endif
+ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
+ /* copy Literals */
+ assert(seqStorePtr->maxNbLit <= 128 KB);
+ assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
+ assert(literals + litLength <= litLimit);
+ if (litEnd <= litLimit_w) {
+ /* Common case we can use wildcopy.
+ * First copy 16 bytes, because literals are likely short.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(seqStorePtr->lit, literals);
+ if (litLength > 16) {
+ ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
+ }
+ } else {
+ ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
+ }
+ seqStorePtr->lit += litLength;
+
+ /* literal Length */
+ if (litLength>0xFFFF) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_literalLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+ /* match offset */
+ seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1);
+
+ /* match Length */
+ assert(matchLength >= MINMATCH);
+ { size_t const mlBase = matchLength - MINMATCH;
+ if (mlBase>0xFFFF) {
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+ }
+ seqStorePtr->sequences[0].mlBase = (U16)mlBase;
+ }
+
+ seqStorePtr->sequences++;
+}
+
+/* ZSTD_updateRep() :
+ * updates in-place @rep (array of repeat offsets)
+ * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq()
+ */
+MEM_STATIC void
+ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
+{
+ if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */
+ rep[2] = rep[1];
+ rep[1] = rep[0];
+ rep[0] = STORED_OFFSET(offBase_minus1);
+ } else { /* repcode */
+ U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0;
+ if (repCode > 0) { /* note : if repCode==0, no change */
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ rep[2] = (repCode >= 2) ? rep[1] : rep[2];
+ rep[1] = rep[0];
+ rep[0] = currentOffset;
+ } else { /* repCode == 0 */
+ /* nothing to do */
+ }
+ }
+}
+
+typedef struct repcodes_s {
+ U32 rep[3];
+} repcodes_t;
+
+MEM_STATIC repcodes_t
+ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
+{
+ repcodes_t newReps;
+ ZSTD_memcpy(&newReps, rep, sizeof(newReps));
+ ZSTD_updateRep(newReps.rep, offBase_minus1, ll0);
+ return newReps;
+}
+
+
+/*-*************************************
+* Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (size_t val)
+{
+ if (MEM_isLittleEndian()) {
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return (__builtin_ctzll((U64)val) >> 3);
+# else
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+ 0, 3, 1, 3, 1, 4, 2, 7,
+ 0, 2, 3, 6, 1, 5, 3, 5,
+ 1, 3, 4, 4, 2, 5, 6, 7,
+ 7, 0, 1, 2, 3, 3, 4, 6,
+ 2, 6, 5, 5, 3, 4, 5, 6,
+ 7, 1, 2, 4, 6, 4, 4, 5,
+ 7, 2, 6, 5, 7, 6, 7, 7 };
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return (__builtin_ctz((U32)val) >> 3);
+# else
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+ 3, 2, 2, 1, 3, 2, 0, 1,
+ 3, 3, 1, 2, 2, 2, 2, 0,
+ 3, 1, 2, 0, 1, 0, 1, 1 };
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+# endif
+ }
+ } else { /* Big Endian CPU */
+ if (MEM_64bits()) {
+# if (__GNUC__ >= 4)
+ return (__builtin_clzll(val) >> 3);
+# else
+ unsigned r;
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } else { /* 32 bits */
+# if (__GNUC__ >= 3)
+ return (__builtin_clz((U32)val) >> 3);
+# else
+ unsigned r;
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+ r += (!val);
+ return r;
+# endif
+ } }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+ const BYTE* const pStart = pIn;
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+ if (pIn < pInLoopLimit) {
+ { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (diff) return ZSTD_NbCommonBytes(diff); }
+ pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
+ while (pIn < pInLoopLimit) {
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+ pIn += ZSTD_NbCommonBytes(diff);
+ return (size_t)(pIn - pStart);
+ } }
+ if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (size_t)(pIn - pStart);
+}
+
+/* ZSTD_count_2segments() :
+ * can count match length with `ip` & `match` in 2 different segments.
+ * convention : on reaching mEnd, match count continue starting from iStart
+ */
+MEM_STATIC size_t
+ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+ const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
+ if (match + matchLength != mEnd) return matchLength;
+ DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
+ DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
+ DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
+ DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
+ DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+ * Hashes
+ ***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+MEM_STATIC FORCE_INLINE_ATTR
+size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+ switch(mls)
+ {
+ default:
+ case 4: return ZSTD_hash4Ptr(p, hBits);
+ case 5: return ZSTD_hash5Ptr(p, hBits);
+ case 6: return ZSTD_hash6Ptr(p, hBits);
+ case 7: return ZSTD_hash7Ptr(p, hBits);
+ case 8: return ZSTD_hash8Ptr(p, hBits);
+ }
+}
+
+/* ZSTD_ipow() :
+ * Return base^exponent.
+ */
+static U64 ZSTD_ipow(U64 base, U64 exponent)
+{
+ U64 power = 1;
+ while (exponent) {
+ if (exponent & 1) power *= base;
+ exponent >>= 1;
+ base *= base;
+ }
+ return power;
+}
+
+#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
+
+/* ZSTD_rollingHash_append() :
+ * Add the buffer to the hash value.
+ */
+static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
+{
+ BYTE const* istart = (BYTE const*)buf;
+ size_t pos;
+ for (pos = 0; pos < size; ++pos) {
+ hash *= prime8bytes;
+ hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ }
+ return hash;
+}
+
+/* ZSTD_rollingHash_compute() :
+ * Compute the rolling hash value of the buffer.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
+{
+ return ZSTD_rollingHash_append(0, buf, size);
+}
+
+/* ZSTD_rollingHash_primePower() :
+ * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
+ * over a window of length bytes.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
+{
+ return ZSTD_ipow(prime8bytes, length - 1);
+}
+
+/* ZSTD_rollingHash_rotate() :
+ * Rotate the rolling hash by one byte.
+ */
+MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
+{
+ hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
+ hash *= prime8bytes;
+ hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
+ return hash;
+}
+
+/*-*************************************
+* Round buffer management
+***************************************/
+#if (ZSTD_WINDOWLOG_MAX_64 > 31)
+# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
+#endif
+/* Max current allowed */
+#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Maximum chunk size before overflow correction needs to be called again */
+#define ZSTD_CHUNKSIZE_MAX \
+ ( ((U32)-1) /* Maximum ending current index */ \
+ - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
+
+/*
+ * ZSTD_window_clear():
+ * Clears the window containing the history by simply setting it to empty.
+ */
+MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
+{
+ size_t const endT = (size_t)(window->nextSrc - window->base);
+ U32 const end = (U32)endT;
+
+ window->lowLimit = end;
+ window->dictLimit = end;
+}
+
+MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
+{
+ return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
+ window.lowLimit == ZSTD_WINDOW_START_INDEX &&
+ (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
+}
+
+/*
+ * ZSTD_window_hasExtDict():
+ * Returns non-zero if the window has a non-empty extDict.
+ */
+MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
+{
+ return window.lowLimit < window.dictLimit;
+}
+
+/*
+ * ZSTD_matchState_dictMode():
+ * Inspects the provided matchState and figures out what dictMode should be
+ * passed to the compressor.
+ */
+MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
+{
+ return ZSTD_window_hasExtDict(ms->window) ?
+ ZSTD_extDict :
+ ms->dictMatchState != NULL ?
+ (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
+ ZSTD_noDict;
+}
+
+/* Defining this macro to non-zero tells zstd to run the overflow correction
+ * code much more frequently. This is very inefficient, and should only be
+ * used for tests and fuzzers.
+ */
+#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
+# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
+# else
+# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
+# endif
+#endif
+
+/*
+ * ZSTD_window_canOverflowCorrect():
+ * Returns non-zero if the indices are large enough for overflow correction
+ * to work correctly without impacting compression ratio.
+ */
+MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src)
+{
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const curr = (U32)((BYTE const*)src - window.base);
+ U32 const minIndexToOverflowCorrect = cycleSize
+ + MAX(maxDist, cycleSize)
+ + ZSTD_WINDOW_START_INDEX;
+
+ /* Adjust the min index to backoff the overflow correction frequency,
+ * so we don't waste too much CPU in overflow correction. If this
+ * computation overflows we don't really care, we just need to make
+ * sure it is at least minIndexToOverflowCorrect.
+ */
+ U32 const adjustment = window.nbOverflowCorrections + 1;
+ U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
+ minIndexToOverflowCorrect);
+ U32 const indexLargeEnough = curr > adjustedIndex;
+
+ /* Only overflow correct early if the dictionary is invalidated already,
+ * so we don't hurt compression ratio.
+ */
+ U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
+
+ return indexLargeEnough && dictionaryInvalidated;
+}
+
+/*
+ * ZSTD_window_needOverflowCorrection():
+ * Returns non-zero if the indices are getting too large and need overflow
+ * protection.
+ */
+MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+ U32 cycleLog,
+ U32 maxDist,
+ U32 loadedDictEnd,
+ void const* src,
+ void const* srcEnd)
+{
+ U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
+ if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
+ return 1;
+ }
+ }
+ return curr > ZSTD_CURRENT_MAX;
+}
+
+/*
+ * ZSTD_window_correctOverflow():
+ * Reduces the indices to protect from index overflow.
+ * Returns the correction made to the indices, which must be applied to every
+ * stored index.
+ *
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ */
+MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+ U32 maxDist, void const* src)
+{
+ /* preemptive overflow correction:
+ * 1. correction is large enough:
+ * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
+ * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+ *
+ * current - newCurrent
+ * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
+ * > (3<<29) - (1<<chainLog)
+ * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
+ * > 1<<29
+ *
+ * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
+ * After correction, current is less than (1<<chainLog + 1<<windowLog).
+ * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
+ * In 32-bit mode we are safe, because (chainLog <= 29), so
+ * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
+ * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
+ * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+ */
+ U32 const cycleSize = 1u << cycleLog;
+ U32 const cycleMask = cycleSize - 1;
+ U32 const curr = (U32)((BYTE const*)src - window->base);
+ U32 const currentCycle = curr & cycleMask;
+ /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
+ U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
+ ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
+ : 0;
+ U32 const newCurrent = currentCycle
+ + currentCycleCorrection
+ + MAX(maxDist, cycleSize);
+ U32 const correction = curr - newCurrent;
+ /* maxDist must be a power of two so that:
+ * (newCurrent & cycleMask) == (curr & cycleMask)
+ * This is required to not corrupt the chains / binary tree.
+ */
+ assert((maxDist & (maxDist - 1)) == 0);
+ assert((curr & cycleMask) == (newCurrent & cycleMask));
+ assert(curr > newCurrent);
+ if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
+ /* Loose bound, should be around 1<<29 (see above) */
+ assert(correction > 1<<28);
+ }
+
+ window->base += correction;
+ window->dictBase += correction;
+ if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
+ window->lowLimit = ZSTD_WINDOW_START_INDEX;
+ } else {
+ window->lowLimit -= correction;
+ }
+ if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
+ window->dictLimit = ZSTD_WINDOW_START_INDEX;
+ } else {
+ window->dictLimit -= correction;
+ }
+
+ /* Ensure we can still reference the full window. */
+ assert(newCurrent >= maxDist);
+ assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
+ /* Ensure that lowLimit and dictLimit didn't underflow. */
+ assert(window->lowLimit <= newCurrent);
+ assert(window->dictLimit <= newCurrent);
+
+ ++window->nbOverflowCorrections;
+
+ DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
+ window->lowLimit);
+ return correction;
+}
+
+/*
+ * ZSTD_window_enforceMaxDist():
+ * Updates lowLimit so that:
+ * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ *
+ * It ensures index is valid as long as index >= lowLimit.
+ * This must be called before a block compression call.
+ *
+ * loadedDictEnd is only defined if a dictionary is in use for current compression.
+ * As the name implies, loadedDictEnd represents the index at end of dictionary.
+ * The value lies within context's referential, it can be directly compared to blockEndIdx.
+ *
+ * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
+ * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
+ * This is because dictionaries are allowed to be referenced fully
+ * as long as the last byte of the dictionary is in the window.
+ * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
+ *
+ * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
+ * In dictMatchState mode, lowLimit and dictLimit are the same,
+ * and the dictionary is below them.
+ * forceWindow and dictMatchState are therefore incompatible.
+ */
+MEM_STATIC void
+ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+ const void* blockEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
+ DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+
+ /* - When there is no dictionary : loadedDictEnd == 0.
+ In which case, the test (blockEndIdx > maxDist) is merely to avoid
+ overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
+ - When there is a standard dictionary :
+ Index referential is copied from the dictionary,
+ which means it starts from 0.
+ In which case, loadedDictEnd == dictSize,
+ and it makes sense to compare `blockEndIdx > maxDist + dictSize`
+ since `blockEndIdx` also starts from zero.
+ - When there is an attached dictionary :
+ loadedDictEnd is expressed within the referential of the context,
+ so it can be directly compared against blockEndIdx.
+ */
+ if (blockEndIdx > maxDist + loadedDictEnd) {
+ U32 const newLowLimit = blockEndIdx - maxDist;
+ if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
+ if (window->dictLimit < window->lowLimit) {
+ DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
+ (unsigned)window->dictLimit, (unsigned)window->lowLimit);
+ window->dictLimit = window->lowLimit;
+ }
+ /* On reaching window size, dictionaries are invalidated */
+ if (loadedDictEndPtr) *loadedDictEndPtr = 0;
+ if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
+ }
+}
+
+/* Similar to ZSTD_window_enforceMaxDist(),
+ * but only invalidates dictionary
+ * when input progresses beyond window size.
+ * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
+ * loadedDictEnd uses same referential as window->base
+ * maxDist is the window size */
+MEM_STATIC void
+ZSTD_checkDictValidity(const ZSTD_window_t* window,
+ const void* blockEnd,
+ U32 maxDist,
+ U32* loadedDictEndPtr,
+ const ZSTD_matchState_t** dictMatchStatePtr)
+{
+ assert(loadedDictEndPtr != NULL);
+ assert(dictMatchStatePtr != NULL);
+ { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
+ U32 const loadedDictEnd = *loadedDictEndPtr;
+ DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
+ (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
+ assert(blockEndIdx >= loadedDictEnd);
+
+ if (blockEndIdx > loadedDictEnd + maxDist) {
+ /* On reaching window size, dictionaries are invalidated.
+ * For simplification, if window size is reached anywhere within next block,
+ * the dictionary is invalidated for the full block.
+ */
+ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
+ *loadedDictEndPtr = 0;
+ *dictMatchStatePtr = NULL;
+ } else {
+ if (*loadedDictEndPtr != 0) {
+ DEBUGLOG(6, "dictionary considered valid for current block");
+ } } }
+}
+
+MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
+ ZSTD_memset(window, 0, sizeof(*window));
+ window->base = (BYTE const*)" ";
+ window->dictBase = (BYTE const*)" ";
+ ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
+ window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
+ window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
+ window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
+ window->nbOverflowCorrections = 0;
+}
+
+/*
+ * ZSTD_window_update():
+ * Updates the window by appending [src, src + srcSize) to the window.
+ * If it is not contiguous, the current prefix becomes the extDict, and we
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
+ void const* src, size_t srcSize,
+ int forceNonContiguous)
+{
+ BYTE const* const ip = (BYTE const*)src;
+ U32 contiguous = 1;
+ DEBUGLOG(5, "ZSTD_window_update");
+ if (srcSize == 0)
+ return contiguous;
+ assert(window->base != NULL);
+ assert(window->dictBase != NULL);
+ /* Check if blocks follow each other */
+ if (src != window->nextSrc || forceNonContiguous) {
+ /* not contiguous */
+ size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
+ DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
+ window->lowLimit = window->dictLimit;
+ assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
+ window->dictLimit = (U32)distanceFromBase;
+ window->dictBase = window->base;
+ window->base = ip - distanceFromBase;
+ /* ms->nextToUpdate = window->dictLimit; */
+ if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
+ contiguous = 0;
+ }
+ window->nextSrc = ip + srcSize;
+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+ if ( (ip+srcSize > window->dictBase + window->lowLimit)
+ & (ip < window->dictBase + window->dictLimit)) {
+ ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+ window->lowLimit = lowLimitMax;
+ DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
+ }
+ return contiguous;
+}
+
+/*
+ * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
+ */
+MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+{
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.lowLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
+ * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
+ * valid for the entire block. So this check is sufficient to find the lowest valid match index.
+ */
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ return matchLowest;
+}
+
+/*
+ * Returns the lowest allowed match index in the prefix.
+ */
+MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
+{
+ U32 const maxDistance = 1U << windowLog;
+ U32 const lowestValid = ms->window.dictLimit;
+ U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ U32 const isDictionary = (ms->loadedDictEnd != 0);
+ /* When computing the lowest prefix index we need to take the dictionary into account to handle
+ * the edge case where the dictionary and the source are contiguous in memory.
+ */
+ U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
+ return matchLowest;
+}
+
+
+
+/* debug functions */
+#if (DEBUGLEVEL>=2)
+
+MEM_STATIC double ZSTD_fWeight(U32 rawStat)
+{
+ U32 const fp_accuracy = 8;
+ U32 const fp_multiplier = (1 << fp_accuracy);
+ U32 const newStat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(newStat);
+ U32 const BWeight = hb * fp_multiplier;
+ U32 const FWeight = (newStat << fp_accuracy) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + fp_accuracy < 31);
+ return (double)weight / fp_multiplier;
+}
+
+/* display a table content,
+ * listing each element, its frequency, and its predicted bit cost */
+MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
+{
+ unsigned u, sum;
+ for (u=0, sum=0; u<=max; u++) sum += table[u];
+ DEBUGLOG(2, "total nb elts: %u", sum);
+ for (u=0; u<=max; u++) {
+ DEBUGLOG(2, "%2u: %5u (%.2f)",
+ u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
+ }
+}
+
+#endif
+
+
+
+/* ===============================================================
+ * Shared internal declarations
+ * These prototypes may be called from sources not in lib/compress
+ * =============================================================== */
+
+/* ZSTD_loadCEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * return : size of dictionary header (size of magic number + dict ID + entropy tables)
+ * assumptions : magic number supposed already checked
+ * and dictSize >= 8 */
+size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
+ const void* const dict, size_t dictSize);
+
+void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
+
+/* ==============================================================
+ * Private declarations
+ * These prototypes shall only be called from within lib/compress
+ * ============================================================== */
+
+/* ZSTD_getCParamsFromCCtxParams() :
+ * cParams are built depending on compressionLevel, src size hints,
+ * LDM and manually set compression parameters.
+ * Note: srcSizeHint == 0 means 0!
+ */
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
+
+/*! ZSTD_initCStream_internal() :
+ * Private use only. Init streaming operation.
+ * expects params to be valid.
+ * must receive dict, or cdict, or none, but not both.
+ * @return : 0, or an error code */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+ const void* dict, size_t dictSize,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
+
+void ZSTD_resetSeqStore(seqStore_t* ssPtr);
+
+/*! ZSTD_getCParamsFromCDict() :
+ * as the name implies */
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_dictTableLoadMethod_e dtlm,
+ const ZSTD_CDict* cdict,
+ const ZSTD_CCtx_params* params,
+ unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict,size_t dictSize,
+ const ZSTD_CCtx_params* params);
+
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+
+
+/* ZSTD_referenceExternalSequences() :
+ * Must be called before starting a compression operation.
+ * seqs must parse a prefix of the source.
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+ * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
+/* ZSTD_cycleLog() :
+ * condition for correct operation : hashLog > 1 */
+U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
+
+/* ZSTD_CCtx_trace() :
+ * Trace the end of a compression call.
+ */
+void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
+
+#endif /* ZSTD_COMPRESS_H */
diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
new file mode 100644
index 000000000..52b0a8059
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_literals.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_literals.h"
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ZSTD_memcpy(ostart + flSize, src, srcSize);
+ DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
+ return srcSize + flSize;
+}
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+ switch(flSize)
+ {
+ case 1: /* 2 - 1 - 5 */
+ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+ break;
+ case 2: /* 2 - 2 - 12 */
+ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+ break;
+ case 3: /* 2 - 2 - 20 */
+ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+ break;
+ default: /* not necessary : flSize is {1,2,3} */
+ assert(0);
+ }
+
+ ostart[flSize] = *(const BYTE*)src;
+ DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
+ return flSize+1;
+}
+
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const int bmi2,
+ unsigned suspectUncompressible)
+{
+ size_t const minGain = ZSTD_minGain(srcSize, strategy);
+ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+ BYTE* const ostart = (BYTE*)dst;
+ U32 singleStream = srcSize < 256;
+ symbolEncodingType_e hType = set_compressed;
+ size_t cLitSize;
+
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
+ disableLiteralCompression, (U32)srcSize);
+
+ /* Prepare nextEntropy assuming reusing the existing table */
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+
+ if (disableLiteralCompression)
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+
+ /* small ? don't even attempt compression (speed opt) */
+# define COMPRESS_LITERALS_SIZE_MIN 63
+ { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+
+ RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
+ { HUF_repeat repeat = prevHuf->repeatMode;
+ int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+ if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+ cLitSize = singleStream ?
+ HUF_compress1X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
+ HUF_compress4X_repeat(
+ ostart+lhSize, dstCapacity-lhSize, src, srcSize,
+ HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
+ (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
+ if (repeat != HUF_repeat_none) {
+ /* reused the existing table */
+ DEBUGLOG(5, "Reusing previous huffman table");
+ hType = set_repeat;
+ }
+ }
+
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+ }
+ if (cLitSize==1) {
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+ }
+
+ if (hType == set_compressed) {
+ /* using a newly constructed table */
+ nextHuf->repeatMode = HUF_repeat_check;
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
+ return lhSize+cLitSize;
+}
diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
new file mode 100644
index 000000000..9775fb97c
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_literals.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_LITERALS_H
+#define ZSTD_COMPRESS_LITERALS_H
+
+#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
+
+
+size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
+size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
+ ZSTD_hufCTables_t* nextHuf,
+ ZSTD_strategy strategy, int disableLiteralCompression,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
+ const int bmi2,
+ unsigned suspectUncompressible);
+
+#endif /* ZSTD_COMPRESS_LITERALS_H */
diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
new file mode 100644
index 000000000..21ddc1b37
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_sequences.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_sequences.h"
+
+/*
+ * -log2(x / 256) lookup table for x in [0, 256).
+ * If x == 0: Return 0
+ * Else: Return floor(-log2(x / 256) * 256)
+ */
+static unsigned const kInverseProbabilityLog256[256] = {
+ 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
+ 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889,
+ 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734,
+ 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626,
+ 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542,
+ 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473,
+ 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415,
+ 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366,
+ 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322,
+ 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282,
+ 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247,
+ 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215,
+ 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185,
+ 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157,
+ 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132,
+ 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108,
+ 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85,
+ 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64,
+ 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44,
+ 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25,
+ 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7,
+ 5, 4, 2, 1,
+};
+
+static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
+ void const* ptr = ctable;
+ U16 const* u16ptr = (U16 const*)ptr;
+ U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
+ return maxSymbolValue;
+}
+
+/*
+ * Returns true if we should use ncount=-1 else we should
+ * use ncount=1 for low probability symbols instead.
+ */
+static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
+{
+ /* Heuristic: This should cover most blocks <= 16K and
+ * start to fade out after 16K to about 32K depending on
+ * comprssibility.
+ */
+ return nbSeq >= 2048;
+}
+
+/*
+ * Returns the cost in bytes of encoding the normalized count header.
+ * Returns an error if any of the helper functions return an error.
+ */
+static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
+ size_t const nbSeq, unsigned const FSELog)
+{
+ BYTE wksp[FSE_NCOUNTBOUND];
+ S16 norm[MaxSeq + 1];
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
+ return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution described by count
+ * using the entropy bound.
+ */
+static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
+{
+ unsigned cost = 0;
+ unsigned s;
+
+ assert(total > 0);
+ for (s = 0; s <= max; ++s) {
+ unsigned norm = (unsigned)((256 * count[s]) / total);
+ if (count[s] != 0 && norm == 0)
+ norm = 1;
+ assert(count[s] < total);
+ cost += count[s] * kInverseProbabilityLog256[norm];
+ }
+ return cost >> 8;
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution in count using ctable.
+ * Returns an error if ctable cannot represent all the symbols in count.
+ */
+size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max)
+{
+ unsigned const kAccuracyLog = 8;
+ size_t cost = 0;
+ unsigned s;
+ FSE_CState_t cstate;
+ FSE_initCState(&cstate, ctable);
+ if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
+ DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
+ ZSTD_getFSEMaxSymbolValue(ctable), max);
+ return ERROR(GENERIC);
+ }
+ for (s = 0; s <= max; ++s) {
+ unsigned const tableLog = cstate.stateLog;
+ unsigned const badCost = (tableLog + 1) << kAccuracyLog;
+ unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
+ if (count[s] == 0)
+ continue;
+ if (bitCost >= badCost) {
+ DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
+ return ERROR(GENERIC);
+ }
+ cost += (size_t)count[s] * bitCost;
+ }
+ return cost >> kAccuracyLog;
+}
+
+/*
+ * Returns the cost in bits of encoding the distribution in count using the
+ * table described by norm. The max symbol support by norm is assumed >= max.
+ * norm must be valid for every symbol with non-zero probability in count.
+ */
+size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max)
+{
+ unsigned const shift = 8 - accuracyLog;
+ size_t cost = 0;
+ unsigned s;
+ assert(accuracyLog <= 8);
+ for (s = 0; s <= max; ++s) {
+ unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
+ unsigned const norm256 = normAcc << shift;
+ assert(norm256 > 0);
+ assert(norm256 < 256);
+ cost += count[s] * kInverseProbabilityLog256[norm256];
+ }
+ return cost >> 8;
+}
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy)
+{
+ ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+ if (mostFrequent == nbSeq) {
+ *repeatMode = FSE_repeat_none;
+ if (isDefaultAllowed && nbSeq <= 2) {
+ /* Prefer set_basic over set_rle when there are 2 or less symbols,
+ * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+ * If basic encoding isn't possible, always choose RLE.
+ */
+ DEBUGLOG(5, "Selected set_basic");
+ return set_basic;
+ }
+ DEBUGLOG(5, "Selected set_rle");
+ return set_rle;
+ }
+ if (strategy < ZSTD_lazy) {
+ if (isDefaultAllowed) {
+ size_t const staticFse_nbSeq_max = 1000;
+ size_t const mult = 10 - strategy;
+ size_t const baseLog = 3;
+ size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */
+ assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */
+ assert(mult <= 9 && mult >= 7);
+ if ( (*repeatMode == FSE_repeat_valid)
+ && (nbSeq < staticFse_nbSeq_max) ) {
+ DEBUGLOG(5, "Selected set_repeat");
+ return set_repeat;
+ }
+ if ( (nbSeq < dynamicFse_nbSeq_min)
+ || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
+ DEBUGLOG(5, "Selected set_basic");
+ /* The format allows default tables to be repeated, but it isn't useful.
+ * When using simple heuristics to select encoding type, we don't want
+ * to confuse these tables with dictionaries. When running more careful
+ * analysis, we don't need to waste time checking both repeating tables
+ * and default tables.
+ */
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ }
+ } else {
+ size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
+ size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
+ size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
+ size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
+
+ if (isDefaultAllowed) {
+ assert(!ZSTD_isError(basicCost));
+ assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
+ }
+ assert(!ZSTD_isError(NCountCost));
+ assert(compressedCost < ERROR(maxCode));
+ DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
+ (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
+ if (basicCost <= repeatCost && basicCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_basic");
+ assert(isDefaultAllowed);
+ *repeatMode = FSE_repeat_none;
+ return set_basic;
+ }
+ if (repeatCost <= compressedCost) {
+ DEBUGLOG(5, "Selected set_repeat");
+ assert(!ZSTD_isError(repeatCost));
+ return set_repeat;
+ }
+ assert(compressedCost < basicCost && compressedCost < repeatCost);
+ }
+ DEBUGLOG(5, "Selected set_compressed");
+ *repeatMode = FSE_repeat_check;
+ return set_compressed;
+}
+
+typedef struct {
+ S16 norm[MaxSeq + 1];
+ U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
+} ZSTD_BuildCTableWksp;
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize)
+{
+ BYTE* op = (BYTE*)dst;
+ const BYTE* const oend = op + dstCapacity;
+ DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
+
+ switch (type) {
+ case set_rle:
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
+ *op = codeTable[0];
+ return 1;
+ case set_repeat:
+ ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
+ return 0;
+ case set_basic:
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
+ return 0;
+ case set_compressed: {
+ ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
+ size_t nbSeq_1 = nbSeq;
+ const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+ if (count[codeTable[nbSeq-1]] > 1) {
+ count[codeTable[nbSeq-1]]--;
+ nbSeq_1--;
+ }
+ assert(nbSeq_1 > 1);
+ assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
+ (void)entropyWorkspaceSize;
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
+ assert(oend >= op);
+ { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
+ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
+ return NCountSize;
+ }
+ }
+ default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
+ }
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_encodeSequences_body(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ BIT_CStream_t blockStream;
+ FSE_CState_t stateMatchLength;
+ FSE_CState_t stateOffsetBits;
+ FSE_CState_t stateLitLength;
+
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
+ dstSize_tooSmall, "not enough space remaining");
+ DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)",
+ (int)(blockStream.endPtr - blockStream.startPtr),
+ (unsigned)dstCapacity);
+
+ /* first symbols */
+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ U32 const ofBits = ofCodeTable[nbSeq-1];
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
+ BIT_flushBits(&blockStream);
+ }
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
+ ofBits - extraBits);
+ } else {
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
+ }
+ BIT_flushBits(&blockStream);
+
+ { size_t n;
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
+ BYTE const llCode = llCodeTable[n];
+ BYTE const ofCode = ofCodeTable[n];
+ BYTE const mlCode = mlCodeTable[n];
+ U32 const llBits = LL_bits[llCode];
+ U32 const ofBits = ofCode;
+ U32 const mlBits = ML_bits[mlCode];
+ DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
+ (unsigned)sequences[n].litLength,
+ (unsigned)sequences[n].mlBase + MINMATCH,
+ (unsigned)sequences[n].offBase);
+ /* 32b*/ /* 64b*/
+ /* (7)*/ /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
+ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_flushBits(&blockStream); /* (7)*/
+ BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+ BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
+ if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
+ if (longOffsets) {
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+ if (extraBits) {
+ BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
+ BIT_flushBits(&blockStream); /* (7)*/
+ }
+ BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
+ ofBits - extraBits); /* 31 */
+ } else {
+ BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
+ }
+ BIT_flushBits(&blockStream); /* (7)*/
+ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
+ } }
+
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
+ FSE_flushCState(&blockStream, &stateMatchLength);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
+ FSE_flushCState(&blockStream, &stateOffsetBits);
+ DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
+ FSE_flushCState(&blockStream, &stateLitLength);
+
+ { size_t const streamSize = BIT_closeCStream(&blockStream);
+ RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
+ return streamSize;
+ }
+}
+
+static size_t
+ZSTD_encodeSequences_default(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+
+#if DYNAMIC_BMI2
+
+static BMI2_TARGET_ATTRIBUTE size_t
+ZSTD_encodeSequences_bmi2(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+ return ZSTD_encodeSequences_body(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
+
+#endif
+
+size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+{
+ DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+ }
+#endif
+ (void)bmi2;
+ return ZSTD_encodeSequences_default(dst, dstCapacity,
+ CTable_MatchLength, mlCodeTable,
+ CTable_OffsetBits, ofCodeTable,
+ CTable_LitLength, llCodeTable,
+ sequences, nbSeq, longOffsets);
+}
diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
new file mode 100644
index 000000000..7991364c2
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_sequences.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_SEQUENCES_H
+#define ZSTD_COMPRESS_SEQUENCES_H
+
+#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
+#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
+
+typedef enum {
+ ZSTD_defaultDisallowed = 0,
+ ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+symbolEncodingType_e
+ZSTD_selectEncodingType(
+ FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
+ size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
+ FSE_CTable const* prevCTable,
+ short const* defaultNorm, U32 defaultNormLog,
+ ZSTD_defaultPolicy_e const isDefaultAllowed,
+ ZSTD_strategy const strategy);
+
+size_t
+ZSTD_buildCTable(void* dst, size_t dstCapacity,
+ FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+ unsigned* count, U32 max,
+ const BYTE* codeTable, size_t nbSeq,
+ const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ const FSE_CTable* prevCTable, size_t prevCTableSize,
+ void* entropyWorkspace, size_t entropyWorkspaceSize);
+
+size_t ZSTD_encodeSequences(
+ void* dst, size_t dstCapacity,
+ FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+ FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+ FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+ seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
+
+size_t ZSTD_fseBitCost(
+ FSE_CTable const* ctable,
+ unsigned const* count,
+ unsigned const max);
+
+size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
+ unsigned const* count, unsigned const max);
+#endif /* ZSTD_COMPRESS_SEQUENCES_H */
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
new file mode 100644
index 000000000..17d836cc8
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_superblock.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ /*-*************************************
+ * Dependencies
+ ***************************************/
+#include "zstd_compress_superblock.h"
+
+#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
+#include "hist.h" /* HIST_countFast_wksp */
+#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
+#include "zstd_compress_sequences.h"
+#include "zstd_compress_literals.h"
+
+/* ZSTD_compressSubBlock_literal() :
+ * Compresses literals section for a sub-block.
+ * When we have to write the Huffman table we will sometimes choose a header
+ * size larger than necessary. This is because we have to pick the header size
+ * before we know the table size + compressed size, so we have a bound on the
+ * table size. If we guessed incorrectly, we fall back to uncompressed literals.
+ *
+ * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
+ * in writing the header, otherwise it is set to 0.
+ *
+ * hufMetadata->hType has literals block type info.
+ * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
+ * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
+ * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
+ * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
+ * and the following sub-blocks' literals sections will be Treeless_Literals_Block.
+ * @return : compressed size of literals section of a sub-block
+ * Or 0 if it unable to compress.
+ * Or error code */
+static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ const BYTE* literals, size_t litSize,
+ void* dst, size_t dstSize,
+ const int bmi2, int writeEntropy, int* entropyWritten)
+{
+ size_t const header = writeEntropy ? 200 : 0;
+ size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* op = ostart + lhSize;
+ U32 const singleStream = lhSize == 3;
+ symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
+ size_t cLitSize = 0;
+
+ (void)bmi2; /* TODO bmi2... */
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
+
+ *entropyWritten = 0;
+ if (litSize == 0 || hufMetadata->hType == set_basic) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ } else if (hufMetadata->hType == set_rle) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
+ return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
+ }
+
+ assert(litSize > 0);
+ assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
+
+ if (writeEntropy && hufMetadata->hType == set_compressed) {
+ ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
+ op += hufMetadata->hufDesSize;
+ cLitSize += hufMetadata->hufDesSize;
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
+ }
+
+ /* TODO bmi2 */
+ { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
+ : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
+ op += cSize;
+ cLitSize += cSize;
+ if (cSize == 0 || ERR_isError(cSize)) {
+ DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
+ return 0;
+ }
+ /* If we expand and we aren't writing a header then emit uncompressed */
+ if (!writeEntropy && cLitSize >= litSize) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ }
+ /* If we are writing headers then allow expansion that doesn't change our header size. */
+ if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
+ assert(cLitSize > litSize);
+ DEBUGLOG(5, "Literals expanded beyond allowed header size");
+ return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
+ }
+ DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
+ }
+
+ /* Build header */
+ switch(lhSize)
+ {
+ case 3: /* 2 - 2 - 10 - 10 */
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
+ MEM_writeLE24(ostart, lhc);
+ break;
+ }
+ case 4: /* 2 - 2 - 14 - 14 */
+ { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
+ MEM_writeLE32(ostart, lhc);
+ break;
+ }
+ case 5: /* 2 - 2 - 18 - 18 */
+ { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
+ MEM_writeLE32(ostart, lhc);
+ ostart[4] = (BYTE)(cLitSize >> 10);
+ break;
+ }
+ default: /* not possible : lhSize is {3,4,5} */
+ assert(0);
+ }
+ *entropyWritten = 1;
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
+ return op-ostart;
+}
+
+static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
+ const seqDef* const sstart = sequences;
+ const seqDef* const send = sequences + nbSeq;
+ const seqDef* sp = sstart;
+ size_t matchLengthSum = 0;
+ size_t litLengthSum = 0;
+ (void)(litLengthSum); /* suppress unused variable warning on some environments */
+ while (send-sp > 0) {
+ ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
+ litLengthSum += seqLen.litLength;
+ matchLengthSum += seqLen.matchLength;
+ sp++;
+ }
+ assert(litLengthSum <= litSize);
+ if (!lastSequence) {
+ assert(litLengthSum == litSize);
+ }
+ return matchLengthSum + litSize;
+}
+
+/* ZSTD_compressSubBlock_sequences() :
+ * Compresses sequences section for a sub-block.
+ * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
+ * symbol compression modes for the super-block.
+ * The first successfully compressed block will have these in its header.
+ * We set entropyWritten=1 when we succeed in compressing the sequences.
+ * The following sub-blocks will always have repeat mode.
+ * @return : compressed size of sequences section of a sub-block
+ * Or 0 if it is unable to compress
+ * Or error code. */
+static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ const seqDef* sequences, size_t nbSeq,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2, int writeEntropy, int* entropyWritten)
+{
+ const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ BYTE* seqHead;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
+
+ *entropyWritten = 0;
+ /* Sequences Header */
+ RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
+ dstSize_tooSmall, "");
+ if (nbSeq < 0x7F)
+ *op++ = (BYTE)nbSeq;
+ else if (nbSeq < LONGNBSEQ)
+ op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+ else
+ op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+ if (nbSeq==0) {
+ return op - ostart;
+ }
+
+ /* seqHead : flags for FSE encoding type */
+ seqHead = op++;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
+
+ if (writeEntropy) {
+ const U32 LLtype = fseMetadata->llType;
+ const U32 Offtype = fseMetadata->ofType;
+ const U32 MLtype = fseMetadata->mlType;
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
+ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+ ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
+ op += fseMetadata->fseTablesSize;
+ } else {
+ const U32 repeat = set_repeat;
+ *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
+ }
+
+ { size_t const bitstreamSize = ZSTD_encodeSequences(
+ op, oend - op,
+ fseTables->matchlengthCTable, mlCode,
+ fseTables->offcodeCTable, ofCode,
+ fseTables->litlengthCTable, llCode,
+ sequences, nbSeq,
+ longOffsets, bmi2);
+ FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
+ op += bitstreamSize;
+ /* zstd versions <= 1.3.4 mistakenly report corruption when
+ * FSE_readNCount() receives a buffer < 4 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1146.
+ * This can happen when the last set_compressed table present is 2
+ * bytes and the bitstream is only one byte.
+ * In this exceedingly rare case, we will simply emit an uncompressed
+ * block, since it isn't worth optimizing.
+ */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
+ /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
+ assert(fseMetadata->lastCountSize + bitstreamSize == 3);
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
+ "emitting an uncompressed block.");
+ return 0;
+ }
+#endif
+ DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
+ }
+
+ /* zstd versions <= 1.4.0 mistakenly report error when
+ * sequences section body size is less than 3 bytes.
+ * Fixed by https://github.com/facebook/zstd/pull/1664.
+ * This can happen when the previous sequences section block is compressed
+ * with rle mode and the current block's sequences section is compressed
+ * with repeat mode where sequences section body size can be 1 byte.
+ */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (op-seqHead < 4) {
+ DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
+ "an uncompressed block when sequences are < 4 bytes");
+ return 0;
+ }
+#endif
+
+ *entropyWritten = 1;
+ return op - ostart;
+}
+
+/* ZSTD_compressSubBlock() :
+ * Compresses a single sub-block.
+ * @return : compressed size of the sub-block
+ * Or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ const seqDef* sequences, size_t nbSeq,
+ const BYTE* literals, size_t litSize,
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const int bmi2,
+ int writeLitEntropy, int writeSeqEntropy,
+ int* litEntropyWritten, int* seqEntropyWritten,
+ U32 lastBlock)
+{
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart + ZSTD_blockHeaderSize;
+ DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
+ litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
+ { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
+ &entropyMetadata->hufMetadata, literals, litSize,
+ op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
+ FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
+ if (cLitSize == 0) return 0;
+ op += cLitSize;
+ }
+ { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
+ &entropyMetadata->fseMetadata,
+ sequences, nbSeq,
+ llCode, mlCode, ofCode,
+ cctxParams,
+ op, oend-op,
+ bmi2, writeSeqEntropy, seqEntropyWritten);
+ FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
+ if (cSeqSize == 0) return 0;
+ op += cSeqSize;
+ }
+ /* Write block header */
+ { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+ MEM_writeLE24(ostart, cBlockHeader24);
+ }
+ return op-ostart;
+}
+
+static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
+ const ZSTD_hufCTables_t* huf,
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ unsigned maxSymbolValue = 255;
+ size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+
+ if (hufMetadata->hType == set_basic) return litSize;
+ else if (hufMetadata->hType == set_rle) return 1;
+ else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
+ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
+ if (ZSTD_isError(largest)) return litSize;
+ { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
+ if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
+ return cLitSizeEstimate + literalSectionHeaderSize;
+ } }
+ assert(0); /* impossible */
+ return 0;
+}
+
+static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
+ const BYTE* codeTable, unsigned maxCode,
+ size_t nbSeq, const FSE_CTable* fseCTable,
+ const U8* additionalBits,
+ short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+ void* workspace, size_t wkspSize)
+{
+ unsigned* const countWksp = (unsigned*)workspace;
+ const BYTE* ctp = codeTable;
+ const BYTE* const ctStart = ctp;
+ const BYTE* const ctEnd = ctStart + nbSeq;
+ size_t cSymbolTypeSizeEstimateInBits = 0;
+ unsigned max = maxCode;
+
+ HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
+ if (type == set_basic) {
+ /* We selected this encoding type, so it must be valid. */
+ assert(max <= defaultMax);
+ cSymbolTypeSizeEstimateInBits = max <= defaultMax
+ ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
+ : ERROR(GENERIC);
+ } else if (type == set_rle) {
+ cSymbolTypeSizeEstimateInBits = 0;
+ } else if (type == set_compressed || type == set_repeat) {
+ cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
+ }
+ if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
+ while (ctp < ctEnd) {
+ if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
+ else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
+ ctp++;
+ }
+ return cSymbolTypeSizeEstimateInBits / 8;
+}
+
+static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_fseCTables_t* fseTables,
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
+ void* workspace, size_t wkspSize,
+ int writeEntropy)
+{
+ size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
+ size_t cSeqSizeEstimate = 0;
+ if (nbSeq == 0) return sequencesSectionHeaderSize;
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
+ nbSeq, fseTables->offcodeCTable, NULL,
+ OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
+ nbSeq, fseTables->litlengthCTable, LL_bits,
+ LL_defaultNorm, LL_defaultNormLog, MaxLL,
+ workspace, wkspSize);
+ cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
+ nbSeq, fseTables->matchlengthCTable, ML_bits,
+ ML_defaultNorm, ML_defaultNormLog, MaxML,
+ workspace, wkspSize);
+ if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
+ return cSeqSizeEstimate + sequencesSectionHeaderSize;
+}
+
+static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
+ const BYTE* ofCodeTable,
+ const BYTE* llCodeTable,
+ const BYTE* mlCodeTable,
+ size_t nbSeq,
+ const ZSTD_entropyCTables_t* entropy,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ void* workspace, size_t wkspSize,
+ int writeLitEntropy, int writeSeqEntropy) {
+ size_t cSizeEstimate = 0;
+ cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
+ &entropy->huf, &entropyMetadata->hufMetadata,
+ workspace, wkspSize, writeLitEntropy);
+ cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
+ nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
+ workspace, wkspSize, writeSeqEntropy);
+ return cSizeEstimate + ZSTD_blockHeaderSize;
+}
+
+static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
+{
+ if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
+ return 1;
+ if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
+ return 1;
+ if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
+ return 1;
+ return 0;
+}
+
+/* ZSTD_compressSubBlock_multi() :
+ * Breaks super-block into multiple sub-blocks and compresses them.
+ * Entropy will be written to the first block.
+ * The following blocks will use repeat mode to compress.
+ * All sub-blocks are compressed blocks (no raw or rle blocks).
+ * @return : compressed size of the super block (which is multiple ZSTD blocks)
+ * Or 0 if it failed to compress. */
+static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
+ const ZSTD_compressedBlockState_t* prevCBlock,
+ ZSTD_compressedBlockState_t* nextCBlock,
+ const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
+ const ZSTD_CCtx_params* cctxParams,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const int bmi2, U32 lastBlock,
+ void* workspace, size_t wkspSize)
+{
+ const seqDef* const sstart = seqStorePtr->sequencesStart;
+ const seqDef* const send = seqStorePtr->sequences;
+ const seqDef* sp = sstart;
+ const BYTE* const lstart = seqStorePtr->litStart;
+ const BYTE* const lend = seqStorePtr->lit;
+ const BYTE* lp = lstart;
+ BYTE const* ip = (BYTE const*)src;
+ BYTE const* const iend = ip + srcSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + dstCapacity;
+ BYTE* op = ostart;
+ const BYTE* llCodePtr = seqStorePtr->llCode;
+ const BYTE* mlCodePtr = seqStorePtr->mlCode;
+ const BYTE* ofCodePtr = seqStorePtr->ofCode;
+ size_t targetCBlockSize = cctxParams->targetCBlockSize;
+ size_t litSize, seqCount;
+ int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
+ int writeSeqEntropy = 1;
+ int lastSequence = 0;
+
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
+ (unsigned)(lend-lp), (unsigned)(send-sstart));
+
+ litSize = 0;
+ seqCount = 0;
+ do {
+ size_t cBlockSizeEstimate = 0;
+ if (sstart == send) {
+ lastSequence = 1;
+ } else {
+ const seqDef* const sequence = sp + seqCount;
+ lastSequence = sequence == send - 1;
+ litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
+ seqCount++;
+ }
+ if (lastSequence) {
+ assert(lp <= lend);
+ assert(litSize <= (size_t)(lend - lp));
+ litSize = (size_t)(lend - lp);
+ }
+ /* I think there is an optimization opportunity here.
+ * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
+ * since it recalculates estimate from scratch.
+ * For example, it would recount literal distribution and symbol codes every time.
+ */
+ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
+ &nextCBlock->entropy, entropyMetadata,
+ workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
+ if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
+ int litEntropyWritten = 0;
+ int seqEntropyWritten = 0;
+ const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
+ const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
+ sp, seqCount,
+ lp, litSize,
+ llCodePtr, mlCodePtr, ofCodePtr,
+ cctxParams,
+ op, oend-op,
+ bmi2, writeLitEntropy, writeSeqEntropy,
+ &litEntropyWritten, &seqEntropyWritten,
+ lastBlock && lastSequence);
+ FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
+ if (cSize > 0 && cSize < decompressedSize) {
+ DEBUGLOG(5, "Committed the sub-block");
+ assert(ip + decompressedSize <= iend);
+ ip += decompressedSize;
+ sp += seqCount;
+ lp += litSize;
+ op += cSize;
+ llCodePtr += seqCount;
+ mlCodePtr += seqCount;
+ ofCodePtr += seqCount;
+ litSize = 0;
+ seqCount = 0;
+ /* Entropy only needs to be written once */
+ if (litEntropyWritten) {
+ writeLitEntropy = 0;
+ }
+ if (seqEntropyWritten) {
+ writeSeqEntropy = 0;
+ }
+ }
+ }
+ } while (!lastSequence);
+ if (writeLitEntropy) {
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
+ ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
+ }
+ if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
+ /* If we haven't written our entropy tables, then we've violated our contract and
+ * must emit an uncompressed block.
+ */
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
+ return 0;
+ }
+ if (ip < iend) {
+ size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
+ FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
+ assert(cSize != 0);
+ op += cSize;
+ /* We have to regenerate the repcodes because we've skipped some sequences */
+ if (sp < send) {
+ seqDef const* seq;
+ repcodes_t rep;
+ ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
+ for (seq = sstart; seq < sp; ++seq) {
+ ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
+ }
+ ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
+ }
+ }
+ DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
+ return op-ostart;
+}
+
+size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ unsigned lastBlock) {
+ ZSTD_entropyCTablesMetadata_t entropyMetadata;
+
+ FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
+ &zc->blockState.prevCBlock->entropy,
+ &zc->blockState.nextCBlock->entropy,
+ &zc->appliedParams,
+ &entropyMetadata,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
+
+ return ZSTD_compressSubBlock_multi(&zc->seqStore,
+ zc->blockState.prevCBlock,
+ zc->blockState.nextCBlock,
+ &entropyMetadata,
+ &zc->appliedParams,
+ dst, dstCapacity,
+ src, srcSize,
+ zc->bmi2, lastBlock,
+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
+}
diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
new file mode 100644
index 000000000..224ece795
--- /dev/null
+++ b/lib/zstd/compress/zstd_compress_superblock.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPRESS_ADVANCED_H
+#define ZSTD_COMPRESS_ADVANCED_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+
+#include <linux/zstd.h> /* ZSTD_CCtx */
+
+/*-*************************************
+* Target Compressed Block Size
+***************************************/
+
+/* ZSTD_compressSuperBlock() :
+ * Used to compress a super block when targetCBlockSize is being used.
+ * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
+size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
+ void* dst, size_t dstCapacity,
+ void const* src, size_t srcSize,
+ unsigned lastBlock);
+
+#endif /* ZSTD_COMPRESS_ADVANCED_H */
diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
new file mode 100644
index 000000000..349fc923c
--- /dev/null
+++ b/lib/zstd/compress/zstd_cwksp.h
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CWKSP_H
+#define ZSTD_CWKSP_H
+
+/*-*************************************
+* Dependencies
+***************************************/
+#include "../common/zstd_internal.h"
+
+
+/*-*************************************
+* Constants
+***************************************/
+
+/* Since the workspace is effectively its own little malloc implementation /
+ * arena, when we run under ASAN, we should similarly insert redzones between
+ * each internal element of the workspace, so ASAN will catch overruns that
+ * reach outside an object but that stay inside the workspace.
+ *
+ * This defines the size of that redzone.
+ */
+#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
+#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
+#endif
+
+
+/* Set our tables and aligneds to align by 64 bytes */
+#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
+
+/*-*************************************
+* Structures
+***************************************/
+typedef enum {
+ ZSTD_cwksp_alloc_objects,
+ ZSTD_cwksp_alloc_buffers,
+ ZSTD_cwksp_alloc_aligned
+} ZSTD_cwksp_alloc_phase_e;
+
+/*
+ * Used to describe whether the workspace is statically allocated (and will not
+ * necessarily ever be freed), or if it's dynamically allocated and we can
+ * expect a well-formed caller to free this.
+ */
+typedef enum {
+ ZSTD_cwksp_dynamic_alloc,
+ ZSTD_cwksp_static_alloc
+} ZSTD_cwksp_static_alloc_e;
+
+/*
+ * Zstd fits all its internal datastructures into a single continuous buffer,
+ * so that it only needs to perform a single OS allocation (or so that a buffer
+ * can be provided to it and it can perform no allocations at all). This buffer
+ * is called the workspace.
+ *
+ * Several optimizations complicate that process of allocating memory ranges
+ * from this workspace for each internal datastructure:
+ *
+ * - These different internal datastructures have different setup requirements:
+ *
+ * - The static objects need to be cleared once and can then be trivially
+ * reused for each compression.
+ *
+ * - Various buffers don't need to be initialized at all--they are always
+ * written into before they're read.
+ *
+ * - The matchstate tables have a unique requirement that they don't need
+ * their memory to be totally cleared, but they do need the memory to have
+ * some bound, i.e., a guarantee that all values in the memory they've been
+ * allocated is less than some maximum value (which is the starting value
+ * for the indices that they will then use for compression). When this
+ * guarantee is provided to them, they can use the memory without any setup
+ * work. When it can't, they have to clear the area.
+ *
+ * - These buffers also have different alignment requirements.
+ *
+ * - We would like to reuse the objects in the workspace for multiple
+ * compressions without having to perform any expensive reallocation or
+ * reinitialization work.
+ *
+ * - We would like to be able to efficiently reuse the workspace across
+ * multiple compressions **even when the compression parameters change** and
+ * we need to resize some of the objects (where possible).
+ *
+ * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
+ * abstraction was created. It works as follows:
+ *
+ * Workspace Layout:
+ *
+ * [ ... workspace ... ]
+ * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
+ *
+ * The various objects that live in the workspace are divided into the
+ * following categories, and are allocated separately:
+ *
+ * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
+ * so that literally everything fits in a single buffer. Note: if present,
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
+ * CDict}() rely on a pointer comparison to see whether one or two frees are
+ * required.
+ *
+ * - Fixed size objects: these are fixed-size, fixed-count objects that are
+ * nonetheless "dynamically" allocated in the workspace so that we can
+ * control how they're initialized separately from the broader ZSTD_CCtx.
+ * Examples:
+ * - Entropy Workspace
+ * - 2 x ZSTD_compressedBlockState_t
+ * - CDict dictionary contents
+ *
+ * - Tables: these are any of several different datastructures (hash tables,
+ * chain tables, binary trees) that all respect a common format: they are
+ * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
+ *
+ * - Aligned: these buffers are used for various purposes that require 4 byte
+ * alignment, but don't require any initialization before they're used. These
+ * buffers are each aligned to 64 bytes.
+ *
+ * - Buffers: these buffers are used for various purposes that don't require
+ * any alignment or initialization before they're used. This means they can
+ * be moved around at no cost for a new compression.
+ *
+ * Allocating Memory:
+ *
+ * The various types of objects must be allocated in order, so they can be
+ * correctly packed into the workspace buffer. That order is:
+ *
+ * 1. Objects
+ * 2. Buffers
+ * 3. Aligned/Tables
+ *
+ * Attempts to reserve objects of different types out of order will fail.
+ */
+typedef struct {
+ void* workspace;
+ void* workspaceEnd;
+
+ void* objectEnd;
+ void* tableEnd;
+ void* tableValidEnd;
+ void* allocStart;
+
+ BYTE allocFailed;
+ int workspaceOversizedDuration;
+ ZSTD_cwksp_alloc_phase_e phase;
+ ZSTD_cwksp_static_alloc_e isStatic;
+} ZSTD_cwksp;
+
+/*-*************************************
+* Functions
+***************************************/
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
+
+MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
+ (void)ws;
+ assert(ws->workspace <= ws->objectEnd);
+ assert(ws->objectEnd <= ws->tableEnd);
+ assert(ws->objectEnd <= ws->tableValidEnd);
+ assert(ws->tableEnd <= ws->allocStart);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ assert(ws->allocStart <= ws->workspaceEnd);
+}
+
+/*
+ * Align must be a power of 2.
+ */
+MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
+ size_t const mask = align - 1;
+ assert((align & mask) == 0);
+ return (size + mask) & ~mask;
+}
+
+/*
+ * Use this to determine how much space in the workspace we will consume to
+ * allocate this object. (Normally it should be exactly the size of the object,
+ * but under special conditions, like ASAN, where we pad each object, it might
+ * be larger.)
+ *
+ * Since tables aren't currently redzoned, you don't need to call through this
+ * to figure out how much space you need for the matchState tables. Everything
+ * else is though.
+ *
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
+ */
+MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
+ if (size == 0)
+ return 0;
+ return size;
+}
+
+/*
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
+ * Used to determine the number of bytes required for a given "aligned".
+ */
+MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
+}
+
+/*
+ * Returns the amount of additional space the cwksp must allocate
+ * for internal purposes (currently only alignment).
+ */
+MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
+ /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
+ * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
+ * to align the beginning of the aligned section.
+ *
+ * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
+ * aligneds being sized in multiples of 64 bytes.
+ */
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
+ return slackSpace;
+}
+
+
+/*
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
+ * alignBytes must be a power of two.
+ */
+MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
+ size_t const alignBytesMask = alignBytes - 1;
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
+ assert((alignBytes & alignBytesMask) == 0);
+ assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
+ return bytes;
+}
+
+/*
+ * Internal function. Do not use directly.
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
+ * which counts from the end of the wksp (as opposed to the object/table segment).
+ *
+ * Returns a pointer to the beginning of that space.
+ */
+MEM_STATIC void*
+ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
+{
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
+ void* const bottom = ws->tableEnd;
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(alloc >= bottom);
+ if (alloc < bottom) {
+ DEBUGLOG(4, "cwksp: alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ /* the area is reserved from the end of wksp.
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
+ if (alloc < ws->tableValidEnd) {
+ ws->tableValidEnd = alloc;
+ }
+ ws->allocStart = alloc;
+ return alloc;
+}
+
+/*
+ * Moves the cwksp to the next phase, and does any necessary allocations.
+ * cwksp initialization must necessarily go through each phase in order.
+ * Returns a 0 on success, or zstd error
+ */
+MEM_STATIC size_t
+ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
+{
+ assert(phase >= ws->phase);
+ if (phase > ws->phase) {
+ /* Going from allocating objects to allocating buffers */
+ if (ws->phase < ZSTD_cwksp_alloc_buffers &&
+ phase >= ZSTD_cwksp_alloc_buffers) {
+ ws->tableValidEnd = ws->objectEnd;
+ }
+
+ /* Going from allocating buffers to allocating aligneds/tables */
+ if (ws->phase < ZSTD_cwksp_alloc_aligned &&
+ phase >= ZSTD_cwksp_alloc_aligned) {
+ { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
+ size_t const bytesToAlign =
+ ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
+ ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
+ RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
+ memory_allocation, "aligned phase - alignment initial allocation failed!");
+ }
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
+ void* const alloc = ws->objectEnd;
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
+ void* const objectEnd = (BYTE*)alloc + bytesToAlign;
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
+ "table phase - alignment initial allocation failed!");
+ ws->objectEnd = objectEnd;
+ ws->tableEnd = objectEnd; /* table area starts being empty */
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ } } }
+ ws->phase = phase;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ }
+ return 0;
+}
+
+/*
+ * Returns whether this object/buffer/etc was allocated in this workspace.
+ */
+MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
+{
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
+}
+
+/*
+ * Internal function. Do not use directly.
+ */
+MEM_STATIC void*
+ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
+{
+ void* alloc;
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
+ return NULL;
+ }
+
+
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
+
+
+ return alloc;
+}
+
+/*
+ * Reserves and returns unaligned memory.
+ */
+MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
+{
+ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
+}
+
+/*
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
+{
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
+ ZSTD_cwksp_alloc_aligned);
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ return ptr;
+}
+
+/*
+ * Aligned on 64 bytes. These buffers have the special property that
+ * their values remain constrained, allowing us to re-use them without
+ * memset()-ing them.
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
+{
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
+ void* alloc;
+ void* end;
+ void* top;
+
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
+ return NULL;
+ }
+ alloc = ws->tableEnd;
+ end = (BYTE *)alloc + bytes;
+ top = ws->allocStart;
+
+ DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
+ assert((bytes & (sizeof(U32)-1)) == 0);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ assert(end <= top);
+ if (end > top) {
+ DEBUGLOG(4, "cwksp: table alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->tableEnd = end;
+
+
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
+ return alloc;
+}
+
+/*
+ * Aligned on sizeof(void*).
+ * Note : should happen only once, at workspace first initialization
+ */
+MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
+{
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
+ void* alloc = ws->objectEnd;
+ void* end = (BYTE*)alloc + roundedBytes;
+
+
+ DEBUGLOG(4,
+ "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
+ alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
+ ZSTD_cwksp_assert_internal_consistency(ws);
+ /* we must be in the first phase, no advance is possible */
+ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
+ DEBUGLOG(3, "cwksp: object alloc failed!");
+ ws->allocFailed = 1;
+ return NULL;
+ }
+ ws->objectEnd = end;
+ ws->tableEnd = end;
+ ws->tableValidEnd = end;
+
+
+ return alloc;
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
+{
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
+
+
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ ws->tableValidEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ws->tableValidEnd = ws->tableEnd;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * Zero the part of the allocated tables not already marked clean.
+ */
+MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
+ assert(ws->tableValidEnd >= ws->objectEnd);
+ assert(ws->tableValidEnd <= ws->allocStart);
+ if (ws->tableValidEnd < ws->tableEnd) {
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
+ }
+ ZSTD_cwksp_mark_tables_clean(ws);
+}
+
+/*
+ * Invalidates table allocations.
+ * All other allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing tables!");
+
+
+ ws->tableEnd = ws->objectEnd;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * Invalidates all buffer, aligned, and table allocations.
+ * Object allocations remain valid.
+ */
+MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
+ DEBUGLOG(4, "cwksp: clearing!");
+
+
+
+ ws->tableEnd = ws->objectEnd;
+ ws->allocStart = ws->workspaceEnd;
+ ws->allocFailed = 0;
+ if (ws->phase > ZSTD_cwksp_alloc_buffers) {
+ ws->phase = ZSTD_cwksp_alloc_buffers;
+ }
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+/*
+ * The provided workspace takes ownership of the buffer [start, start+size).
+ * Any existing values in the workspace are ignored (the previously managed
+ * buffer, if present, must be separately freed).
+ */
+MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
+ DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
+ assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
+ ws->workspace = start;
+ ws->workspaceEnd = (BYTE*)start + size;
+ ws->objectEnd = ws->workspace;
+ ws->tableValidEnd = ws->objectEnd;
+ ws->phase = ZSTD_cwksp_alloc_objects;
+ ws->isStatic = isStatic;
+ ZSTD_cwksp_clear(ws);
+ ws->workspaceOversizedDuration = 0;
+ ZSTD_cwksp_assert_internal_consistency(ws);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
+ void* workspace = ZSTD_customMalloc(size, customMem);
+ DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
+ return 0;
+}
+
+MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
+ void *ptr = ws->workspace;
+ DEBUGLOG(4, "cwksp: freeing workspace");
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
+ ZSTD_customFree(ptr, customMem);
+}
+
+/*
+ * Moves the management of a workspace from one cwksp to another. The src cwksp
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
+ */
+MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
+ *dst = *src;
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
+}
+
+MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
+}
+
+MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
+}
+
+MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
+ return ws->allocFailed;
+}
+
+/*-*************************************
+* Functions Checking Free Space
+***************************************/
+
+/* ZSTD_alignmentSpaceWithinBounds() :
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
+ * actual amount of space used.
+ */
+MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
+ size_t const estimatedSpace, int resizedWorkspace) {
+ if (resizedWorkspace) {
+ /* Resized/newly allocated wksp should have exact bounds */
+ return ZSTD_cwksp_used(ws) == estimatedSpace;
+ } else {
+ /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
+ * than estimatedSpace. See the comments in zstd_cwksp.h for details.
+ */
+ return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
+ }
+}
+
+
+MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
+ return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
+}
+
+MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_available(
+ ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
+}
+
+MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
+ && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
+ ZSTD_cwksp* ws, size_t additionalNeededSpace) {
+ if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
+ ws->workspaceOversizedDuration++;
+ } else {
+ ws->workspaceOversizedDuration = 0;
+ }
+}
+
+
+#endif /* ZSTD_CWKSP_H */
diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
new file mode 100644
index 000000000..76933dea2
--- /dev/null
+++ b/lib/zstd/compress/zstd_double_fast.c
@@ -0,0 +1,696 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashLarge = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash tables.
+ * Insert the other positions into the large hash table if their entry
+ * is empty.
+ */
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ U32 i;
+ for (i = 0; i < fastHashFillStep; ++i) {
+ size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
+ size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
+ if (i == 0)
+ hashSmall[smHash] = curr + i;
+ if (i == 0 || hashLarge[lgHash] == 0)
+ hashLarge[lgHash] = curr + i;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ if (dtlm == ZSTD_dtlm_fast)
+ break;
+ } }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_noDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
+ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ size_t mLength;
+ U32 offset;
+ U32 curr;
+
+ /* how many positions to search before increasing step size */
+ const size_t kStepIncr = 1 << kSearchStrength;
+ /* the position at which to increment the step size if no match is found */
+ const BYTE* nextStep;
+ size_t step; /* the current step size */
+
+ size_t hl0; /* the long hash at ip */
+ size_t hl1; /* the long hash at ip1 */
+
+ U32 idxl0; /* the long match index for ip */
+ U32 idxl1; /* the long match index for ip1 */
+
+ const BYTE* matchl0; /* the long match for ip */
+ const BYTE* matchs0; /* the short match for ip */
+ const BYTE* matchl1; /* the long match for ip1 */
+
+ const BYTE* ip = istart; /* the current position */
+ const BYTE* ip1; /* the next position */
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
+
+ /* init */
+ ip += ((ip - prefixLowest) == 0);
+ {
+ U32 const current = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
+ U32 const maxRep = current - windowLow;
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+ }
+
+ /* Outer Loop: one iteration per match found and stored */
+ while (1) {
+ step = 1;
+ nextStep = ip + kStepIncr;
+ ip1 = ip + step;
+
+ if (ip1 > ilimit) {
+ goto _cleanup;
+ }
+
+ hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
+ idxl0 = hashLong[hl0];
+ matchl0 = base + idxl0;
+
+ /* Inner Loop: one iteration per search / position */
+ do {
+ const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 idxs0 = hashSmall[hs0];
+ curr = (U32)(ip-base);
+ matchs0 = base + idxs0;
+
+ hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
+
+ /* check noDict repcode */
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ goto _match_stored;
+ }
+
+ hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
+
+ if (idxl0 > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchl0) == MEM_read64(ip)) {
+ mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
+ offset = (U32)(ip-matchl0);
+ while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+
+ idxl1 = hashLong[hl1];
+ matchl1 = base + idxl1;
+
+ if (idxs0 > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(matchs0) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ }
+
+ if (ip1 >= nextStep) {
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ step++;
+ nextStep += kStepIncr;
+ }
+ ip = ip1;
+ ip1 += step;
+
+ hl0 = hl1;
+ idxl0 = idxl1;
+ matchl0 = matchl1;
+ #if defined(__aarch64__)
+ PREFETCH_L1(ip+256);
+ #endif
+ } while (ip1 <= ilimit);
+
+_cleanup:
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_search_next_long:
+
+ /* check prefix long +1 match */
+ if (idxl1 > prefixLowestIndex) {
+ if (MEM_read64(matchl1) == MEM_read64(ip1)) {
+ ip = ip1;
+ mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
+ offset = (U32)(ip-matchl1);
+ while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ }
+
+ /* if no long +1 match, explore the short match we found */
+ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
+ offset = (U32)(ip - matchs0);
+ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
+
+ /* fall-through */
+
+_match_found: /* requires ip, offset, mLength */
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ if (step < 4) {
+ /* It is unsafe to write this value back to the hashtable when ip1 is
+ * greater than or equal to the new ip we will have after we're done
+ * processing this match. Rather than perform that test directly
+ * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
+ * more predictable test. The minmatch even if we take a short match is
+ * 4 bytes, so as long as step, the distance between ip and ip1
+ * (initially) is less than 4, we know ip1 < new ip. */
+ hashLong[hl1] = (U32)(ip1 - base);
+ }
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+_match_stored:
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while ( (ip <= ilimit)
+ && ( (offset_2>0)
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
+ ip += rLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ }
+ }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ const U32 hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ const U32 hBitsS = cParams->chainLog;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ /* presumes that, if there is a dictionary, it must be using Attach mode */
+ const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
+ const U32* const dictHashLong = dms->hashTable;
+ const U32* const dictHashSmall = dms->chainTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
+ const U32 dictHBitsL = dictCParams->hashLog;
+ const U32 dictHBitsS = dictCParams->chainLog;
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic");
+
+ /* if a dictionary is attached, it must be within window range */
+ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
+
+ /* init */
+ ip += (dictAndPrefixLength == 0);
+
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ U32 offset;
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+ size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
+ size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
+ U32 const curr = (U32)(ip-base);
+ U32 const matchIndexL = hashLong[h2];
+ U32 matchIndexS = hashSmall[h];
+ const BYTE* matchLong = base + matchIndexL;
+ const BYTE* match = base + matchIndexS;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
+
+ /* check repcode */
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ goto _match_stored;
+ }
+
+ if (matchIndexL > prefixLowestIndex) {
+ /* check prefix long match */
+ if (MEM_read64(matchLong) == MEM_read64(ip)) {
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+ offset = (U32)(ip-matchLong);
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else {
+ /* check dictMatchState long match */
+ U32 const dictMatchIndexL = dictHashLong[dictHL];
+ const BYTE* dictMatchL = dictBase + dictMatchIndexL;
+ assert(dictMatchL < dictEnd);
+
+ if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
+ mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
+ offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
+ goto _match_found;
+ } }
+
+ if (matchIndexS > prefixLowestIndex) {
+ /* check prefix short match */
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ }
+ } else {
+ /* check dictMatchState short match */
+ U32 const dictMatchIndexS = dictHashSmall[dictHS];
+ match = dictBase + dictMatchIndexS;
+ matchIndexS = dictMatchIndexS + dictIndexDelta;
+
+ if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
+ goto _search_next_long;
+ } }
+
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+#if defined(__aarch64__)
+ PREFETCH_L1(ip+256);
+#endif
+ continue;
+
+_search_next_long:
+
+ { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
+ U32 const matchIndexL3 = hashLong[hl3];
+ const BYTE* matchL3 = base + matchIndexL3;
+ hashLong[hl3] = curr + 1;
+
+ /* check prefix long +1 match */
+ if (matchIndexL3 > prefixLowestIndex) {
+ if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+ ip++;
+ offset = (U32)(ip-matchL3);
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ }
+ } else {
+ /* check dict long +1 match */
+ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
+ const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
+ assert(dictMatchL3 < dictEnd);
+ if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
+ mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
+ ip++;
+ offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
+ while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
+ goto _match_found;
+ } } }
+
+ /* if no long +1 match, explore the short match we found */
+ if (matchIndexS < prefixLowestIndex) {
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
+ offset = (U32)(curr - matchIndexS);
+ while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ } else {
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ offset = (U32)(ip - match);
+ while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+
+_match_found:
+ offset_2 = offset_1;
+ offset_1 = offset;
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+_match_stored:
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
+ dictBase + repIndex2 - dictIndexDelta :
+ base + repIndex2;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+ } /* while (ip < ilimit) */
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
+ static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ void const* src, size_t srcSize) \
+ { \
+ return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
+ }
+
+ZSTD_GEN_DFAST_FN(noDict, 4)
+ZSTD_GEN_DFAST_FN(noDict, 5)
+ZSTD_GEN_DFAST_FN(noDict, 6)
+ZSTD_GEN_DFAST_FN(noDict, 7)
+
+ZSTD_GEN_DFAST_FN(dictMatchState, 4)
+ZSTD_GEN_DFAST_FN(dictMatchState, 5)
+ZSTD_GEN_DFAST_FN(dictMatchState, 6)
+ZSTD_GEN_DFAST_FN(dictMatchState, 7)
+
+
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize);
+ }
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ const U32 mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls /* template */)
+{
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
+ U32* const hashLong = ms->hashTable;
+ U32 const hBitsL = cParams->hashLog;
+ U32* const hashSmall = ms->chainTable;
+ U32 const hBitsS = cParams->chainLog;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 dictStartIndex = lowLimit;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
+
+ /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
+ if (prefixStartIndex == dictStartIndex)
+ return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+ const U32 matchIndex = hashSmall[hSmall];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+ const U32 matchLongIndex = hashLong[hLong];
+ const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ size_t mLength;
+ hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
+
+ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
+ & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ } else {
+ if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+ const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 offset;
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
+ offset = curr - matchLongIndex;
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+ } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+ U32 const matchIndex3 = hashLong[h3];
+ const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
+ const BYTE* match3 = match3Base + matchIndex3;
+ U32 offset;
+ hashLong[h3] = curr + 1;
+ if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+ const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
+ ip++;
+ offset = curr+1 - matchIndex3;
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+ } else {
+ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ offset = curr - matchIndex;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ }
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+
+ } else {
+ ip += ((ip-anchor) >> kSearchStrength) + 1;
+ continue;
+ } }
+
+ /* move to next sequence start */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Complementary insertion */
+ /* done after iLimit test, as candidates could be > iend-8 */
+ { U32 const indexToInsert = curr+2;
+ hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+ hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
+ hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
+ & (offset_2 <= current2 - dictStartIndex))
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+ZSTD_GEN_DFAST_FN(extDict, 4)
+ZSTD_GEN_DFAST_FN(extDict, 5)
+ZSTD_GEN_DFAST_FN(extDict, 6)
+ZSTD_GEN_DFAST_FN(extDict, 7)
+
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize);
+ }
+}
diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
new file mode 100644
index 000000000..6822bde65
--- /dev/null
+++ b/lib/zstd/compress/zstd_double_fast.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+
+#include "../common/mem.h" /* U32 */
+#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_doubleFast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
new file mode 100644
index 000000000..a752e6bea
--- /dev/null
+++ b/lib/zstd/compress/zstd_fast.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ const void* const end,
+ ZSTD_dictTableLoadMethod_e dtlm)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hBits = cParams->hashLog;
+ U32 const mls = cParams->minMatch;
+ const BYTE* const base = ms->window.base;
+ const BYTE* ip = base + ms->nextToUpdate;
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+ const U32 fastHashFillStep = 3;
+
+ /* Always insert every fastHashFillStep position into the hash table.
+ * Insert the other positions if their hash entry is empty.
+ */
+ for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
+ U32 const curr = (U32)(ip - base);
+ size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
+ hashTable[hash0] = curr;
+ if (dtlm == ZSTD_dtlm_fast) continue;
+ /* Only load extra positions for ZSTD_dtlm_full */
+ { U32 p;
+ for (p = 1; p < fastHashFillStep; ++p) {
+ size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
+ if (hashTable[hash] == 0) { /* not yet filled */
+ hashTable[hash] = curr + p;
+ } } } }
+}
+
+
+/*
+ * If you squint hard enough (and ignore repcodes), the search operation at any
+ * given position is broken into 4 stages:
+ *
+ * 1. Hash (map position to hash value via input read)
+ * 2. Lookup (map hash val to index via hashtable read)
+ * 3. Load (map index to value at that position via input read)
+ * 4. Compare
+ *
+ * Each of these steps involves a memory read at an address which is computed
+ * from the previous step. This means these steps must be sequenced and their
+ * latencies are cumulative.
+ *
+ * Rather than do 1->2->3->4 sequentially for a single position before moving
+ * onto the next, this implementation interleaves these operations across the
+ * next few positions:
+ *
+ * R = Repcode Read & Compare
+ * H = Hash
+ * T = Table Lookup
+ * M = Match Read & Compare
+ *
+ * Pos | Time -->
+ * ----+-------------------
+ * N | ... M
+ * N+1 | ... TM
+ * N+2 | R H T M
+ * N+3 | H TM
+ * N+4 | R H T M
+ * N+5 | H ...
+ * N+6 | R ...
+ *
+ * This is very much analogous to the pipelining of execution in a CPU. And just
+ * like a CPU, we have to dump the pipeline when we find a match (i.e., take a
+ * branch).
+ *
+ * When this happens, we throw away our current state, and do the following prep
+ * to re-enter the loop:
+ *
+ * Pos | Time -->
+ * ----+-------------------
+ * N | H T
+ * N+1 | H
+ *
+ * This is also the work we do at the beginning to enter the loop initially.
+ */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_fast_noDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize,
+ U32 const mls, U32 const hasStep)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+
+ const BYTE* anchor = istart;
+ const BYTE* ip0 = istart;
+ const BYTE* ip1;
+ const BYTE* ip2;
+ const BYTE* ip3;
+ U32 current0;
+
+ U32 rep_offset1 = rep[0];
+ U32 rep_offset2 = rep[1];
+ U32 offsetSaved = 0;
+
+ size_t hash0; /* hash for ip0 */
+ size_t hash1; /* hash for ip1 */
+ U32 idx; /* match idx for ip0 */
+ U32 mval; /* src value at match idx */
+
+ U32 offcode;
+ const BYTE* match0;
+ size_t mLength;
+
+ /* ip0 and ip1 are always adjacent. The targetLength skipping and
+ * uncompressibility acceleration is applied to every other position,
+ * matching the behavior of #1562. step therefore represents the gap
+ * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
+ size_t step;
+ const BYTE* nextStep;
+ const size_t kStepIncr = (1 << (kSearchStrength - 1));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
+ ip0 += (ip0 == prefixStart);
+ { U32 const curr = (U32)(ip0 - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0;
+ if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0;
+ }
+
+ /* start each op */
+_start: /* Requires: ip0 */
+
+ step = stepSize;
+ nextStep = ip0 + kStepIncr;
+
+ /* calculate positions, ip0 - anchor == 0, so we skip step calc */
+ ip1 = ip0 + 1;
+ ip2 = ip0 + step;
+ ip3 = ip2 + 1;
+
+ if (ip3 >= ilimit) {
+ goto _cleanup;
+ }
+
+ hash0 = ZSTD_hashPtr(ip0, hlog, mls);
+ hash1 = ZSTD_hashPtr(ip1, hlog, mls);
+
+ idx = hashTable[hash0];
+
+ do {
+ /* load repcode match for ip[2]*/
+ const U32 rval = MEM_read32(ip2 - rep_offset1);
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* check repcode at ip[2] */
+ if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
+ ip0 = ip2;
+ match0 = ip0 - rep_offset1;
+ mLength = ip0[-1] == match0[-1];
+ ip0 -= mLength;
+ match0 -= mLength;
+ offcode = STORE_REPCODE_1;
+ mLength += 4;
+ goto _match;
+ }
+
+ /* load match for ip[0] */
+ if (idx >= prefixStartIndex) {
+ mval = MEM_read32(base + idx);
+ } else {
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
+ }
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip3;
+
+ /* write back hash table entry */
+ current0 = (U32)(ip0 - base);
+ hashTable[hash0] = current0;
+
+ /* load match for ip[0] */
+ if (idx >= prefixStartIndex) {
+ mval = MEM_read32(base + idx);
+ } else {
+ mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
+ }
+
+ /* check match at ip[0] */
+ if (MEM_read32(ip0) == mval) {
+ /* found a match! */
+ goto _offset;
+ }
+
+ /* lookup ip[1] */
+ idx = hashTable[hash1];
+
+ /* hash ip[2] */
+ hash0 = hash1;
+ hash1 = ZSTD_hashPtr(ip2, hlog, mls);
+
+ /* advance to next positions */
+ ip0 = ip1;
+ ip1 = ip2;
+ ip2 = ip0 + step;
+ ip3 = ip1 + step;
+
+ /* calculate step */
+ if (ip2 >= nextStep) {
+ step++;
+ PREFETCH_L1(ip1 + 64);
+ PREFETCH_L1(ip1 + 128);
+ nextStep += kStepIncr;
+ }
+ } while (ip3 < ilimit);
+
+_cleanup:
+ /* Note that there are probably still a couple positions we could search.
+ * However, it seems to be a meaningful performance hit to try to search
+ * them. So let's not. */
+
+ /* save reps for next block */
+ rep[0] = rep_offset1 ? rep_offset1 : offsetSaved;
+ rep[1] = rep_offset2 ? rep_offset2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+
+_offset: /* Requires: ip0, idx */
+
+ /* Compute the offset code. */
+ match0 = base + idx;
+ rep_offset2 = rep_offset1;
+ rep_offset1 = (U32)(ip0-match0);
+ offcode = STORE_OFFSET(rep_offset1);
+ mLength = 4;
+
+ /* Count the backwards match length. */
+ while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
+ ip0--;
+ match0--;
+ mLength++;
+ }
+
+_match: /* Requires: ip0, match0, offcode */
+
+ /* Count the forward length. */
+ mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
+
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
+
+ ip0 += mLength;
+ anchor = ip0;
+
+ /* write next hash table entry */
+ if (ip1 < ip0) {
+ hashTable[hash1] = (U32)(ip1 - base);
+ }
+
+ /* Fill table and check for immediate repcode. */
+ if (ip0 <= ilimit) {
+ /* Fill Table */
+ assert(base+current0+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
+
+ if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
+ while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
+ /* store sequence */
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
+ { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
+ ip0 += rLength;
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
+ anchor = ip0;
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
+ } } }
+
+ goto _start;
+}
+
+#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
+ static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
+ void const* src, size_t srcSize) \
+ { \
+ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
+ }
+
+ZSTD_GEN_FAST_FN(noDict, 4, 1)
+ZSTD_GEN_FAST_FN(noDict, 5, 1)
+ZSTD_GEN_FAST_FN(noDict, 6, 1)
+ZSTD_GEN_FAST_FN(noDict, 7, 1)
+
+ZSTD_GEN_FAST_FN(noDict, 4, 0)
+ZSTD_GEN_FAST_FN(noDict, 5, 0)
+ZSTD_GEN_FAST_FN(noDict, 6, 0)
+ZSTD_GEN_FAST_FN(noDict, 7, 0)
+
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState == NULL);
+ if (ms->cParams.targetLength > 1) {
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
+ }
+ } else {
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
+ }
+
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_dictMatchState_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 prefixStartIndex = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
+ U32 offset_1=rep[0], offset_2=rep[1];
+ U32 offsetSaved = 0;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
+ const U32* const dictHashTable = dms->hashTable;
+ const U32 dictStartIndex = dms->window.dictLimit;
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
+ const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
+ const U32 dictHLog = dictCParams->hashLog;
+
+ /* if a dictionary is still attached, it necessarily means that
+ * it is within window size. So we just check it. */
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
+ assert(endIndex - prefixStartIndex <= maxDistance);
+ (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
+
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
+
+ /* ensure there will be no underflow
+ * when translating a dict index into a local index */
+ assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
+ ip += (dictAndPrefixLength == 0);
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+
+ /* Main Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
+ size_t mLength;
+ size_t const h = ZSTD_hashPtr(ip, hlog, mls);
+ U32 const curr = (U32)(ip-base);
+ U32 const matchIndex = hashTable[h];
+ const BYTE* match = base + matchIndex;
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ hashTable[h] = curr; /* update hash table */
+
+ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
+ } else if ( (matchIndex <= prefixStartIndex) ) {
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
+ U32 const dictMatchIndex = dictHashTable[dictHash];
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
+ if (dictMatchIndex <= dictStartIndex ||
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a dict match */
+ U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
+ while (((ip>anchor) & (dictMatch>dictStart))
+ && (ip[-1] == dictMatch[-1])) {
+ ip--; dictMatch--; mLength++;
+ } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ }
+ } else if (MEM_read32(match) != MEM_read32(ip)) {
+ /* it's not a match, and we're not going to check the dictionary */
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ } else {
+ /* found a regular match */
+ U32 const offset = (U32)(ip-match);
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+ while (((ip>anchor) & (match>prefixStart))
+ && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1;
+ offset_1 = offset;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ }
+
+ /* match found */
+ ip += mLength;
+ anchor = ip;
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ assert(base+curr+2 > istart); /* check base overflow */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
+ dictBase - dictIndexDelta + repIndex2 :
+ base + repIndex2;
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+ }
+
+ /* save reps for next block */
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
+ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
+
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ assert(ms->dictMatchState != NULL);
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
+ }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hlog = cParams->hashLog;
+ /* support stepSize of 0 */
+ U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
+ const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
+ const U32 dictStartIndex = lowLimit;
+ const BYTE* const dictStart = dictBase + dictStartIndex;
+ const U32 dictLimit = ms->window.dictLimit;
+ const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
+ const BYTE* const prefixStart = base + prefixStartIndex;
+ const BYTE* const dictEnd = dictBase + prefixStartIndex;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ U32 offset_1=rep[0], offset_2=rep[1];
+
+ (void)hasStep; /* not currently specialized on whether it's accelerated */
+
+ DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
+
+ /* switch to "regular" variant if extDict is invalidated due to maxDistance */
+ if (prefixStartIndex == dictStartIndex)
+ return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
+
+ /* Search Loop */
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
+ const size_t h = ZSTD_hashPtr(ip, hlog, mls);
+ const U32 matchIndex = hashTable[h];
+ const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* match = matchBase + matchIndex;
+ const U32 curr = (U32)(ip-base);
+ const U32 repIndex = curr + 1 - offset_1;
+ const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ hashTable[h] = curr; /* update hash table */
+ DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
+
+ if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
+ & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
+ size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
+ ip++;
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
+ ip += rLength;
+ anchor = ip;
+ } else {
+ if ( (matchIndex < dictStartIndex) ||
+ (MEM_read32(match) != MEM_read32(ip)) ) {
+ assert(stepSize >= 1);
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+ continue;
+ }
+ { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
+ const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
+ U32 const offset = curr - matchIndex;
+ size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = offset; /* update offset history */
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
+ ip += mLength;
+ anchor = ip;
+ } }
+
+ if (ip <= ilimit) {
+ /* Fill Table */
+ hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex2 = current2 - offset_2;
+ const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
+ { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+ ip += repLength2;
+ anchor = ip;
+ continue;
+ }
+ break;
+ } } }
+
+ /* save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+ZSTD_GEN_FAST_FN(extDict, 4, 0)
+ZSTD_GEN_FAST_FN(extDict, 5, 0)
+ZSTD_GEN_FAST_FN(extDict, 6, 0)
+ZSTD_GEN_FAST_FN(extDict, 7, 0)
+
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ U32 const mls = ms->cParams.minMatch;
+ switch(mls)
+ {
+ default: /* includes case 3 */
+ case 4 :
+ return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
+ case 5 :
+ return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
+ case 6 :
+ return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
+ case 7 :
+ return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
+ }
+}
diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
new file mode 100644
index 000000000..fddc2f532
--- /dev/null
+++ b/lib/zstd/compress/zstd_fast.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+
+#include "../common/mem.h" /* U32 */
+#include "zstd_compress_internal.h"
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+ void const* end, ZSTD_dictTableLoadMethod_e dtlm);
+size_t ZSTD_compressBlock_fast(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
new file mode 100644
index 000000000..0298a01a7
--- /dev/null
+++ b/lib/zstd/compress/zstd_lazy.c
@@ -0,0 +1,2102 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+
+static void
+ZSTD_updateDUBT(ZSTD_matchState_t* ms,
+ const BYTE* ip, const BYTE* iend,
+ U32 mls)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ if (idx != target)
+ DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
+ idx, target, ms->window.dictLimit);
+ assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
+ (void)iend;
+
+ assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
+ for ( ; idx < target ; idx++) {
+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
+ U32 const matchIndex = hashTable[h];
+
+ U32* const nextCandidatePtr = bt + 2*(idx&btMask);
+ U32* const sortMarkPtr = nextCandidatePtr + 1;
+
+ DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
+ hashTable[h] = idx; /* Update Hash Table */
+ *nextCandidatePtr = matchIndex; /* update BT like a chain */
+ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
+ }
+ ms->nextToUpdate = target;
+}
+
+
+/* ZSTD_insertDUBT1() :
+ * sort one already inserted but unsorted position
+ * assumption : curr >= btlow == (curr - btmask)
+ * doesn't fail */
+static void
+ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
+ U32 curr, const BYTE* inputEnd,
+ U32 nbCompares, U32 btLow,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
+ const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
+ U32 dummy32; /* to be nullified at the end */
+ U32 const windowValid = ms->window.lowLimit;
+ U32 const maxDistance = 1U << cParams->windowLog;
+ U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
+
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
+ curr, dictLimit, windowLow);
+ assert(curr >= btLow);
+ assert(ip < iend); /* condition for ZSTD_count */
+
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < curr);
+ /* note : all candidates are now supposed sorted,
+ * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
+ * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
+
+ if ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
+ || (curr < dictLimit) /* both in extDict */) {
+ const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
+ || (matchIndex+matchLength >= dictLimit)) ?
+ base : dictBase;
+ assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
+ || (curr < dictLimit) );
+ match = mBase + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* preparation for next read of match[matchLength] */
+ }
+
+ DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
+ curr, matchIndex, (U32)matchLength);
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
+ matchIndex, btLow, nextPtr[1]);
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
+ matchIndex, btLow, nextPtr[0]);
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+}
+
+
+static size_t
+ZSTD_DUBT_findBetterDictMatch (
+ const ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ size_t bestLength,
+ U32 nbCompares,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_matchState_t * const dms = ms->dictMatchState;
+ const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
+ const U32 * const dictHashTable = dms->hashTable;
+ U32 const hashLog = dmsCParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 dictMatchIndex = dictHashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ U32 const curr = (U32)(ip-base);
+ const BYTE* const dictBase = dms->window.base;
+ const BYTE* const dictEnd = dms->window.nextSrc;
+ U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
+ U32 const dictLowLimit = dms->window.lowLimit;
+ U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
+
+ U32* const dictBt = dms->chainTable;
+ U32 const btLog = dmsCParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
+
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+
+ (void)dictMode;
+ assert(dictMode == ZSTD_dictMatchState);
+
+ for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
+ U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dictBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dictHighLimit)
+ match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ U32 matchIndex = dictMatchIndex + dictIndexDelta;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
+ DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex);
+ bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ }
+ if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ }
+ }
+
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+
+}
+
+
+static size_t
+ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+
+ const BYTE* const base = ms->window.base;
+ U32 const curr = (U32)(ip-base);
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
+
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const unsortLimit = MAX(btLow, windowLow);
+
+ U32* nextCandidate = bt + 2*(matchIndex&btMask);
+ U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ U32 nbCompares = 1U << cParams->searchLog;
+ U32 nbCandidates = nbCompares;
+ U32 previousCandidate = 0;
+
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
+ assert(ip <= iend-8); /* required for h calculation */
+ assert(dictMode != ZSTD_dedicatedDictSearch);
+
+ /* reach end of unsorted candidates list */
+ while ( (matchIndex > unsortLimit)
+ && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
+ && (nbCandidates > 1) ) {
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
+ matchIndex);
+ *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
+ previousCandidate = matchIndex;
+ matchIndex = *nextCandidate;
+ nextCandidate = bt + 2*(matchIndex&btMask);
+ unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+ nbCandidates --;
+ }
+
+ /* nullify last candidate if it's still unsorted
+ * simplification, detrimental to compression ratio, beneficial for speed */
+ if ( (matchIndex > unsortLimit)
+ && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
+ matchIndex);
+ *nextCandidate = *unsortedMark = 0;
+ }
+
+ /* batch sort stacked candidates */
+ matchIndex = previousCandidate;
+ while (matchIndex) { /* will end on matchIndex == 0 */
+ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
+ U32 const nextCandidateIdx = *nextCandidateIdxPtr;
+ ZSTD_insertDUBT1(ms, matchIndex, iend,
+ nbCandidates, unsortLimit, dictMode);
+ matchIndex = nextCandidateIdx;
+ nbCandidates++;
+ }
+
+ /* find longest match */
+ { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr + 8 + 1;
+ U32 dummy32; /* to be nullified at the end */
+ size_t bestLength = 0;
+
+ matchIndex = hashTable[h];
+ hashTable[h] = curr; /* Update Hash Table */
+
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match;
+
+ if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+ bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ if (dictMode == ZSTD_dictMatchState) {
+ nbCompares = 0; /* in addition to avoiding checking any
+ * further in this loop, make sure we
+ * skip checking in the dictionary. */
+ }
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ }
+ }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ bestLength = ZSTD_DUBT_findBetterDictMatch(
+ ms, ip, iend,
+ offsetPtr, bestLength, nbCompares,
+ mls, dictMode);
+ }
+
+ assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ if (bestLength >= MINMATCH) {
+ U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex;
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
+ }
+ return bestLength;
+ }
+}
+
+
+/* ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls /* template */,
+ const ZSTD_dictMode_e dictMode)
+{
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch");
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
+ ZSTD_updateDUBT(ms, ip, iLimit, mls);
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
+}
+
+/* *********************************
+* Dedicated dict search
+***********************************/
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32* const hashTable = ms->hashTable;
+ U32* const chainTable = ms->chainTable;
+ U32 const chainSize = 1 << ms->cParams.chainLog;
+ U32 idx = ms->nextToUpdate;
+ U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
+ U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const cacheSize = bucketSize - 1;
+ U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
+ U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
+
+ /* We know the hashtable is oversized by a factor of `bucketSize`.
+ * We are going to temporarily pretend `bucketSize == 1`, keeping only a
+ * single entry. We will use the rest of the space to construct a temporary
+ * chaintable.
+ */
+ U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32* const tmpHashTable = hashTable;
+ U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
+ U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
+ U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
+ U32 hashIdx;
+
+ assert(ms->cParams.chainLog <= 24);
+ assert(ms->cParams.hashLog > ms->cParams.chainLog);
+ assert(idx != 0);
+ assert(tmpMinChain <= minChain);
+
+ /* fill conventional hash table and conventional chain table */
+ for ( ; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
+ if (idx >= tmpMinChain) {
+ tmpChainTable[idx - tmpMinChain] = hashTable[h];
+ }
+ tmpHashTable[h] = idx;
+ }
+
+ /* sort chains into ddss chain table */
+ {
+ U32 chainPos = 0;
+ for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
+ U32 count;
+ U32 countBeyondMinChain = 0;
+ U32 i = tmpHashTable[hashIdx];
+ for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
+ /* skip through the chain to the first position that won't be
+ * in the hash cache bucket */
+ if (i < minChain) {
+ countBeyondMinChain++;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ if (count == cacheSize) {
+ for (count = 0; count < chainLimit;) {
+ if (i < minChain) {
+ if (!i || ++countBeyondMinChain > cacheSize) {
+ /* only allow pulling `cacheSize` number of entries
+ * into the cache or chainTable beyond `minChain`,
+ * to replace the entries pulled out of the
+ * chainTable into the cache. This lets us reach
+ * back further without increasing the total number
+ * of entries in the chainTable, guaranteeing the
+ * DDSS chain table will fit into the space
+ * allocated for the regular one. */
+ break;
+ }
+ }
+ chainTable[chainPos++] = i;
+ count++;
+ if (i < tmpMinChain) {
+ break;
+ }
+ i = tmpChainTable[i - tmpMinChain];
+ }
+ } else {
+ count = 0;
+ }
+ if (count) {
+ tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
+ } else {
+ tmpHashTable[hashIdx] = 0;
+ }
+ }
+ assert(chainPos <= chainSize); /* I believe this is guaranteed... */
+ }
+
+ /* move chain pointers into the last entry of each hash bucket */
+ for (hashIdx = (1 << hashLog); hashIdx; ) {
+ U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 const chainPackedPointer = tmpHashTable[hashIdx];
+ U32 i;
+ for (i = 0; i < cacheSize; i++) {
+ hashTable[bucketIdx + i] = 0;
+ }
+ hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
+ }
+
+ /* fill the buckets of the hash table */
+ for (idx = ms->nextToUpdate; idx < target; idx++) {
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
+ << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ U32 i;
+ /* Shift hash cache down 1. */
+ for (i = cacheSize - 1; i; i--)
+ hashTable[h + i] = hashTable[h + i - 1];
+ hashTable[h] = idx;
+ }
+
+ ms->nextToUpdate = target;
+}
+
+/* Returns the longest match length found in the dedicated dict search structure.
+ * If none are longer than the argument ml, then ml will be returned.
+ */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
+ const ZSTD_matchState_t* const dms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE* const prefixStart, const U32 curr,
+ const U32 dictLimit, const size_t ddsIdx) {
+ const U32 ddsLowestIndex = dms->window.dictLimit;
+ const BYTE* const ddsBase = dms->window.base;
+ const BYTE* const ddsEnd = dms->window.nextSrc;
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
+ U32 ddsAttempt;
+ U32 matchIndex;
+
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 const chainIndex = chainPackedPointer >> 8;
+
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
+ }
+
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
+ match = ddsBase + matchIndex;
+
+ if (!matchIndex) {
+ return ml;
+ }
+
+ /* guaranteed by table construction */
+ (void)ddsLowestIndex;
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
+ if (ip+currentMl == iLimit) {
+ /* best possible, avoids read overflow on next attempt */
+ return ml;
+ }
+ }
+ }
+
+ {
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
+ U32 chainIndex = chainPackedPointer >> 8;
+ U32 const chainLength = chainPackedPointer & 0xFF;
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
+ U32 chainAttempt;
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
+ }
+
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
+ size_t currentMl=0;
+ const BYTE* match;
+ matchIndex = dms->chainTable[chainIndex];
+ match = ddsBase + matchIndex;
+
+ /* guaranteed by table construction */
+ assert(matchIndex >= ddsLowestIndex);
+ assert(match+4 <= ddsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) {
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta));
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+ return ml;
+}
+
+
+/* *********************************
+* Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
+
+/* Update chains up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
+ ZSTD_matchState_t* ms,
+ const ZSTD_compressionParameters* const cParams,
+ const BYTE* ip, U32 const mls)
+{
+ U32* const hashTable = ms->hashTable;
+ const U32 hashLog = cParams->hashLog;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+
+ while(idx < target) { /* catch up */
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ ms->nextToUpdate = target;
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
+}
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_HcFindBestMatch(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const chainTable = ms->chainTable;
+ const U32 chainSize = (1 << cParams->chainLog);
+ const U32 chainMask = chainSize-1;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
+ U32 nbAttempts = 1U << cParams->searchLog;
+ size_t ml=4-1;
+
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
+ ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+ const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
+ ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
+
+ U32 matchIndex;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32* entry = &dms->hashTable[ddsIdx];
+ PREFETCH_L1(entry);
+ }
+
+ /* HC4 match finder */
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+
+ for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= minChain) break;
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+ }
+
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
+ const U32* const dmsChainTable = dms->chainTable;
+ const U32 dmsChainSize = (1 << dms->cParams.chainLog);
+ const U32 dmsChainMask = dmsChainSize - 1;
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+ const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
+
+ matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
+
+ for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
+ size_t currentMl=0;
+ const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+
+ /* save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ assert(curr > matchIndex + dmsIndexDelta);
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+
+ if (matchIndex <= dmsMinChain) break;
+
+ matchIndex = dmsChainTable[matchIndex & dmsChainMask];
+ }
+ }
+
+ return ml;
+}
+
+/* *********************************
+* (SIMD) Row-based matchfinder
+***********************************/
+/* Constants for row-based hash */
+#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
+#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
+#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
+#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
+
+#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
+
+typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
+
+/* ZSTD_VecMask_next():
+ * Starting from the LSB, returns the idx of the next non-zero bit.
+ * Basically counting the nb of trailing zeroes.
+ */
+static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
+ assert(val != 0);
+# if (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))
+ if (sizeof(size_t) == 4) {
+ U32 mostSignificantWord = (U32)(val >> 32);
+ U32 leastSignificantWord = (U32)val;
+ if (leastSignificantWord == 0) {
+ return 32 + (U32)__builtin_ctz(mostSignificantWord);
+ } else {
+ return (U32)__builtin_ctz(leastSignificantWord);
+ }
+ } else {
+ return (U32)__builtin_ctzll(val);
+ }
+# else
+ /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count
+ * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer
+ */
+ val = ~val & (val - 1ULL); /* Lowest set bit mask */
+ val = val - ((val >> 1) & 0x5555555555555555);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
+# endif
+}
+
+/* ZSTD_rotateRight_*():
+ * Rotates a bitfield to the right by "count" bits.
+ * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
+ */
+FORCE_INLINE_TEMPLATE
+U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
+ assert(count < 64);
+ count &= 0x3F; /* for fickle pattern recognition */
+ return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
+ assert(count < 32);
+ count &= 0x1F; /* for fickle pattern recognition */
+ return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
+}
+
+FORCE_INLINE_TEMPLATE
+U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
+ assert(count < 16);
+ count &= 0x0F; /* for fickle pattern recognition */
+ return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
+}
+
+/* ZSTD_row_nextIndex():
+ * Returns the next index to insert at within a tagTable row, and updates the "head"
+ * value to reflect the update. Essentially cycles backwards from [0, {entries per row})
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
+ U32 const next = (*tagRow - 1) & rowMask;
+ *tagRow = (BYTE)next;
+ return next;
+}
+
+/* ZSTD_isAligned():
+ * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
+ */
+MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
+ assert((align & (align - 1)) == 0);
+ return (((size_t)ptr) & (align - 1)) == 0;
+}
+
+/* ZSTD_row_prefetch():
+ * Performs prefetching for the hashTable and tagTable at a given row.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
+ PREFETCH_L1(hashTable + relRow);
+ if (rowLog >= 5) {
+ PREFETCH_L1(hashTable + relRow + 16);
+ /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
+ }
+ PREFETCH_L1(tagTable + relRow);
+ if (rowLog == 6) {
+ PREFETCH_L1(tagTable + relRow + 32);
+ }
+ assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
+ assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
+ assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
+}
+
+/* ZSTD_row_fillHashCache():
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
+ * but not beyond iLimit.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
+ U32 const rowLog, U32 const mls,
+ U32 idx, const BYTE* const iLimit)
+{
+ U32 const* const hashTable = ms->hashTable;
+ U16 const* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
+ U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
+
+ for (; idx < lim; ++idx) {
+ U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
+ }
+
+ DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
+ ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
+ ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
+}
+
+/* ZSTD_row_nextCachedHash():
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
+ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
+ */
+FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
+ U16 const* tagTable, BYTE const* base,
+ U32 idx, U32 const hashLog,
+ U32 const rowLog, U32 const mls)
+{
+ U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
+ { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
+ cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
+ return hash;
+ }
+}
+
+/* ZSTD_row_update_internalImpl():
+ * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
+ U32 updateStartIdx, U32 const updateEndIdx,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
+{
+ U32* const hashTable = ms->hashTable;
+ U16* const tagTable = ms->tagTable;
+ U32 const hashLog = ms->rowHashLog;
+ const BYTE* const base = ms->window.base;
+
+ DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
+ for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls)
+ : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
+ Explicit cast allows us to get exact desired position within each row */
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+
+ assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
+ ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
+ row[pos] = updateStartIdx;
+ }
+}
+
+/* ZSTD_row_update_internal():
+ * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
+ * Skips sections of long matches as is necessary.
+ */
+FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
+ U32 const mls, U32 const rowLog,
+ U32 const rowMask, U32 const useCache)
+{
+ U32 idx = ms->nextToUpdate;
+ const BYTE* const base = ms->window.base;
+ const U32 target = (U32)(ip - base);
+ const U32 kSkipThreshold = 384;
+ const U32 kMaxMatchStartPositionsToUpdate = 96;
+ const U32 kMaxMatchEndPositionsToUpdate = 32;
+
+ if (useCache) {
+ /* Only skip positions when using hash cache, i.e.
+ * if we are loading a dict, don't skip anything.
+ * If we decide to skip, then we only update a set number
+ * of positions at the beginning and end of the match.
+ */
+ if (UNLIKELY(target - idx > kSkipThreshold)) {
+ U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
+ ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
+ idx = target - kMaxMatchEndPositionsToUpdate;
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
+ }
+ }
+ assert(target >= idx);
+ ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
+ ms->nextToUpdate = target;
+}
+
+/* ZSTD_row_update():
+ * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
+ * processing.
+ */
+void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+ const U32 rowMask = (1u << rowLog) - 1;
+ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
+
+ DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
+}
+
+#if defined(ZSTD_ARCH_X86_SSE2)
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
+{
+ const __m128i comparisonMask = _mm_set1_epi8((char)tag);
+ int matches[4] = {0};
+ int i;
+ assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
+ for (i=0; i<nbChunks; i++) {
+ const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
+ const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
+ matches[i] = _mm_movemask_epi8(equalMask);
+ }
+ if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
+ if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
+ assert(nbChunks == 4);
+ return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
+}
+#endif
+
+/* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
+ * the hash at the nth position in a row of the tagTable.
+ * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
+ * to match up with the actual layout of the entries within the hashTable */
+FORCE_INLINE_TEMPLATE ZSTD_VecMask
+ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries)
+{
+ const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET;
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
+
+#if defined(ZSTD_ARCH_X86_SSE2)
+
+ return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head);
+
+#else /* SW or NEON-LE */
+
+# if defined(ZSTD_ARCH_ARM_NEON)
+ /* This NEON path only works for little endian - otherwise use SWAR below */
+ if (MEM_isLittleEndian()) {
+ if (rowEntries == 16) {
+ const uint8x16_t chunk = vld1q_u8(src);
+ const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
+ const uint16x8_t t0 = vshlq_n_u16(equalMask, 7);
+ const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14));
+ const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14));
+ const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28));
+ const U16 hi = (U16)vgetq_lane_u8(t3, 8);
+ const U16 lo = (U16)vgetq_lane_u8(t3, 0);
+ return ZSTD_rotateRight_U16((hi << 8) | lo, head);
+ } else if (rowEntries == 32) {
+ const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src);
+ const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
+ const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
+ const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag));
+ const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag));
+ const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0));
+ const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1));
+ const uint8x8_t t0 = vreinterpret_u8_s8(pack0);
+ const uint8x8_t t1 = vreinterpret_u8_s8(pack1);
+ const uint8x8_t t2 = vsri_n_u8(t1, t0, 2);
+ const uint8x8x2_t t3 = vuzp_u8(t2, t0);
+ const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4);
+ const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0);
+ return ZSTD_rotateRight_U32(matches, head);
+ } else { /* rowEntries == 64 */
+ const uint8x16x4_t chunk = vld4q_u8(src);
+ const uint8x16_t dup = vdupq_n_u8(tag);
+ const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
+ const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
+ const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
+ const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
+
+ const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
+ const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
+ const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
+ const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
+ const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
+ return ZSTD_rotateRight_U64(matches, head);
+ }
+ }
+# endif /* ZSTD_ARCH_ARM_NEON */
+ /* SWAR */
+ { const size_t chunkSize = sizeof(size_t);
+ const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
+ const size_t xFF = ~((size_t)0);
+ const size_t x01 = xFF / 0xFF;
+ const size_t x80 = x01 << 7;
+ const size_t splatChar = tag * x01;
+ ZSTD_VecMask matches = 0;
+ int i = rowEntries - chunkSize;
+ assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
+ if (MEM_isLittleEndian()) { /* runtime check so have two loops */
+ const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
+ do {
+ size_t chunk = MEM_readST(&src[i]);
+ chunk ^= splatChar;
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
+ matches <<= chunkSize;
+ matches |= (chunk * extractMagic) >> shiftAmount;
+ i -= chunkSize;
+ } while (i >= 0);
+ } else { /* big endian: reverse bits during extraction */
+ const size_t msb = xFF ^ (xFF >> 1);
+ const size_t extractMagic = (msb / 0x1FF) | msb;
+ do {
+ size_t chunk = MEM_readST(&src[i]);
+ chunk ^= splatChar;
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
+ matches <<= chunkSize;
+ matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
+ i -= chunkSize;
+ } while (i >= 0);
+ }
+ matches = ~matches;
+ if (rowEntries == 16) {
+ return ZSTD_rotateRight_U16((U16)matches, head);
+ } else if (rowEntries == 32) {
+ return ZSTD_rotateRight_U32((U32)matches, head);
+ } else {
+ return ZSTD_rotateRight_U64((U64)matches, head);
+ }
+ }
+#endif
+}
+
+/* The high-level approach of the SIMD row based match finder is as follows:
+ * - Figure out where to insert the new entry:
+ * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
+ * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
+ * which row to insert into.
+ * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
+ * be considered as a circular buffer with a "head" index that resides in the tagTable.
+ * - Also insert the "tag" into the equivalent row and position in the tagTable.
+ * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
+ * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
+ * for alignment/performance reasons, leaving some bytes unused.
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
+ * - Pick the longest match.
+ */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_RowFindBestMatch(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iLimit,
+ size_t* offsetPtr,
+ const U32 mls, const ZSTD_dictMode_e dictMode,
+ const U32 rowLog)
+{
+ U32* const hashTable = ms->hashTable;
+ U16* const tagTable = ms->tagTable;
+ U32* const hashCache = ms->hashCache;
+ const U32 hashLog = ms->rowHashLog;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const U32 curr = (U32)(ip-base);
+ const U32 maxDistance = 1U << cParams->windowLog;
+ const U32 lowestValid = ms->window.lowLimit;
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
+ const U32 rowEntries = (1U << rowLog);
+ const U32 rowMask = rowEntries - 1;
+ const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
+ U32 nbAttempts = 1U << cappedSearchLog;
+ size_t ml=4-1;
+
+ /* DMS/DDS variables that may be referenced laster */
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+
+ /* Initialize the following variables to satisfy static analyzer */
+ size_t ddsIdx = 0;
+ U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
+ U32 dmsTag = 0;
+ U32* dmsRow = NULL;
+ BYTE* dmsTagRow = NULL;
+
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
+ { /* Prefetch DDS hashtable entry */
+ ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
+ PREFETCH_L1(&dms->hashTable[ddsIdx]);
+ }
+ ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
+ }
+
+ if (dictMode == ZSTD_dictMatchState) {
+ /* Prefetch DMS rows */
+ U32* const dmsHashTable = dms->hashTable;
+ U16* const dmsTagTable = dms->tagTable;
+ U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
+ U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
+ dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
+ dmsRow = dmsHashTable + dmsRelRow;
+ ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
+ }
+
+ /* Update the hashTable and tagTable up to (but not including) ip */
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
+ { /* Get the hash for ip, compute the appropriate row */
+ U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
+ U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
+ U32* const row = hashTable + relRow;
+ BYTE* tagRow = (BYTE*)(tagTable + relRow);
+ U32 const head = *tagRow & rowMask;
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
+
+ /* Cycle through the matches and prefetch */
+ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
+ U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ U32 const matchIndex = row[matchPos];
+ assert(numMatches < rowEntries);
+ if (matchIndex < lowLimit)
+ break;
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ PREFETCH_L1(base + matchIndex);
+ } else {
+ PREFETCH_L1(dictBase + matchIndex);
+ }
+ matchBuffer[numMatches++] = matchIndex;
+ }
+
+ /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
+ in ZSTD_row_update_internal() at the next search. */
+ {
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
+ tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
+ row[pos] = ms->nextToUpdate++;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex < curr);
+ assert(matchIndex >= lowLimit);
+
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
+ const BYTE* const match = base + matchIndex;
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
+ if (match[ml] == ip[ml]) /* potentially better */
+ currentMl = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex;
+ assert(match+4 <= dictEnd);
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+ }
+
+ /* Save best solution */
+ if (currentMl > ml) {
+ ml = currentMl;
+ *offsetPtr = STORE_OFFSET(curr - matchIndex);
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+ }
+ }
+ }
+
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dedicatedDictSearch) {
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
+ } else if (dictMode == ZSTD_dictMatchState) {
+ /* TODO: Measure and potentially add prefetching to DMS */
+ const U32 dmsLowestIndex = dms->window.dictLimit;
+ const BYTE* const dmsBase = dms->window.base;
+ const BYTE* const dmsEnd = dms->window.nextSrc;
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
+
+ { U32 const head = *dmsTagRow & rowMask;
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
+ size_t numMatches = 0;
+ size_t currMatch = 0;
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
+
+ for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
+ U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
+ U32 const matchIndex = dmsRow[matchPos];
+ if (matchIndex < dmsLowestIndex)
+ break;
+ PREFETCH_L1(dmsBase + matchIndex);
+ matchBuffer[numMatches++] = matchIndex;
+ }
+
+ /* Return the longest match */
+ for (; currMatch < numMatches; ++currMatch) {
+ U32 const matchIndex = matchBuffer[currMatch];
+ size_t currentMl=0;
+ assert(matchIndex >= dmsLowestIndex);
+ assert(matchIndex < curr);
+
+ { const BYTE* const match = dmsBase + matchIndex;
+ assert(match+4 <= dmsEnd);
+ if (MEM_read32(match) == MEM_read32(ip))
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
+ }
+
+ if (currentMl > ml) {
+ ml = currentMl;
+ assert(curr > matchIndex + dmsIndexDelta);
+ *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta));
+ if (ip+currentMl == iLimit) break;
+ }
+ }
+ }
+ }
+ return ml;
+}
+
+
+/*
+ * Generate search functions templated on (dictMode, mls, rowLog).
+ * These functions are outlined for code size & compilation time.
+ * ZSTD_searchMax() dispatches to the correct implementation function.
+ *
+ * TODO: The start of the search function involves loading and calculating a
+ * bunch of constants from the ZSTD_matchState_t. These computations could be
+ * done in an initialization function, and saved somewhere in the match state.
+ * Then we could pass a pointer to the saved state instead of the match state,
+ * and avoid duplicate computations.
+ *
+ * TODO: Move the match re-winding into searchMax. This improves compression
+ * ratio, and unlocks further simplifications with the next TODO.
+ *
+ * TODO: Try moving the repcode search into searchMax. After the re-winding
+ * and repcode search are in searchMax, there is no more logic in the match
+ * finder loop that requires knowledge about the dictMode. So we should be
+ * able to avoid force inlining it, and we can join the extDict loop with
+ * the single segment loop. It should go in searchMax instead of its own
+ * function to avoid having multiple virtual function calls per search.
+ */
+
+#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
+#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
+#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
+
+#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
+
+#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
+ ZSTD_matchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offBasePtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
+ } \
+
+#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
+ ZSTD_matchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offsetPtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
+ } \
+
+#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
+ ZSTD_matchState_t* ms, \
+ const BYTE* ip, const BYTE* const iLimit, \
+ size_t* offsetPtr) \
+ { \
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
+ assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
+ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
+ } \
+
+#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
+ X(dictMode, mls, 4) \
+ X(dictMode, mls, 5) \
+ X(dictMode, mls, 6)
+
+#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
+
+#define ZSTD_FOR_EACH_MLS(X, dictMode) \
+ X(dictMode, 4) \
+ X(dictMode, 5) \
+ X(dictMode, 6)
+
+#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
+ X(__VA_ARGS__, noDict) \
+ X(__VA_ARGS__, extDict) \
+ X(__VA_ARGS__, dictMatchState) \
+ X(__VA_ARGS__, dedicatedDictSearch)
+
+/* Generate row search fns for each combination of (dictMode, mls, rowLog) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
+/* Generate binary Tree search fns for each combination of (dictMode, mls) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
+/* Generate hash chain search fns for each combination of (dictMode, mls) */
+ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
+
+typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
+
+#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
+ case mls: \
+ return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
+#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
+ case mls: \
+ return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
+#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
+ case rowLog: \
+ return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
+
+#define ZSTD_SWITCH_MLS(X, dictMode) \
+ switch (mls) { \
+ ZSTD_FOR_EACH_MLS(X, dictMode) \
+ }
+
+#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
+ case mls: \
+ switch (rowLog) { \
+ ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
+ } \
+ ZSTD_UNREACHABLE; \
+ break;
+
+#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
+ switch (searchMethod) { \
+ case search_hashChain: \
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
+ break; \
+ case search_binaryTree: \
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
+ break; \
+ case search_rowHash: \
+ ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
+ break; \
+ } \
+ ZSTD_UNREACHABLE;
+
+/*
+ * Searches for the longest match at @p ip.
+ * Dispatches to the correct implementation function based on the
+ * (searchMethod, dictMode, mls, rowLog). We use switch statements
+ * here instead of using an indirect function call through a function
+ * pointer because after Spectre and Meltdown mitigations, indirect
+ * function calls can be very costly, especially in the kernel.
+ *
+ * NOTE: dictMode and searchMethod should be templated, so those switch
+ * statements should be optimized out. Only the mls & rowLog switches
+ * should be left.
+ *
+ * @param ms The match state.
+ * @param ip The position to search at.
+ * @param iend The end of the input data.
+ * @param[out] offsetPtr Stores the match offset into this pointer.
+ * @param mls The minimum search length, in the range [4, 6].
+ * @param rowLog The row log (if applicable), in the range [4, 6].
+ * @param searchMethod The search method to use (templated).
+ * @param dictMode The dictMode (templated).
+ *
+ * @returns The length of the longest match found, or < mls if no match is found.
+ * If a match is found its offset is stored in @p offsetPtr.
+ */
+FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
+ ZSTD_matchState_t* ms,
+ const BYTE* ip,
+ const BYTE* iend,
+ size_t* offsetPtr,
+ U32 const mls,
+ U32 const rowLog,
+ searchMethod_e const searchMethod,
+ ZSTD_dictMode_e const dictMode)
+{
+ if (dictMode == ZSTD_noDict) {
+ ZSTD_SWITCH_SEARCH_METHOD(noDict)
+ } else if (dictMode == ZSTD_extDict) {
+ ZSTD_SWITCH_SEARCH_METHOD(extDict)
+ } else if (dictMode == ZSTD_dictMatchState) {
+ ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
+ } else if (dictMode == ZSTD_dedicatedDictSearch) {
+ ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
+ }
+ ZSTD_UNREACHABLE;
+ return 0;
+}
+
+/* *******************************
+* Common parser - lazy strategy
+*********************************/
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_lazy_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const searchMethod_e searchMethod, const U32 depth,
+ ZSTD_dictMode_e const dictMode)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 prefixLowestIndex = ms->window.dictLimit;
+ const BYTE* const prefixLowest = base + prefixLowestIndex;
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+
+ U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+
+ const int isDMS = dictMode == ZSTD_dictMatchState;
+ const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
+ const int isDxS = isDMS || isDDS;
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
+ const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
+ const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
+ const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
+ const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
+ const U32 dictIndexDelta = isDxS ?
+ prefixLowestIndex - (U32)(dictEnd - dictBase) :
+ 0;
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
+
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
+ ip += (dictAndPrefixLength == 0);
+ if (dictMode == ZSTD_noDict) {
+ U32 const curr = (U32)(ip - base);
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
+ U32 const maxRep = curr - windowLow;
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+ }
+ if (isDxS) {
+ /* dictMatchState repCode checks don't currently handle repCode == 0
+ * disabling. */
+ assert(offset_1 <= dictAndPrefixLength);
+ assert(offset_2 <= dictAndPrefixLength);
+ }
+
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog,
+ MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
+ ms->nextToUpdate, ilimit);
+ }
+
+ /* Match Loop */
+#if defined(__x86_64__)
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
+ */
+ __asm__(".p2align 5");
+#endif
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offcode=STORE_REPCODE_1;
+ const BYTE* start=ip+1;
+ DEBUGLOG(7, "search baseline (depth 0)");
+
+ /* check repCode */
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
+ const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
+ && repIndex < prefixLowestIndex) ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+ }
+ if ( dictMode == ZSTD_noDict
+ && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
+ matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+ if (depth==0) goto _storeSequence;
+ }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, dictMode);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offcode=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ DEBUGLOG(7, "search depth 1");
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ DEBUGLOG(7, "search depth 2");
+ ip ++;
+ if ( (dictMode == ZSTD_noDict)
+ && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+ size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ if (isDxS) {
+ const U32 repIndex = (U32)(ip - base) - offset_1;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase + (repIndex - dictIndexDelta) :
+ base + repIndex;
+ if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
+ size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
+ int const gain2 = (int)(mlRep * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((mlRep >= 4) && (gain2 > gain1))
+ matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip;
+ }
+ }
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, dictMode);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* NOTE:
+ * Pay attention that `start[-value]` can lead to strange undefined behavior
+ * notably if `value` is unsigned, resulting in a large positive `-value`.
+ */
+ /* catch up */
+ if (STORED_IS_OFFSET(offcode)) {
+ if (dictMode == ZSTD_noDict) {
+ while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest))
+ && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */
+ { start--; matchLength++; }
+ }
+ if (isDxS) {
+ U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
+ const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ }
+ offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
+ }
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = (size_t)(start - anchor);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ if (isDxS) {
+ while (ip <= ilimit) {
+ U32 const current2 = (U32)(ip-base);
+ U32 const repIndex = current2 - offset_2;
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
+ dictBase - dictIndexDelta + repIndex :
+ base + repIndex;
+ if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
+ && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
+ const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
+ offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ ip += matchLength;
+ anchor = ip;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (dictMode == ZSTD_noDict) {
+ while ( ((ip <= ilimit) & (offset_2>0))
+ && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+ /* store sequence */
+ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+ offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ } } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1 ? offset_1 : savedOffset;
+ rep[1] = offset_2 ? offset_2 : savedOffset;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
+}
+
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
+}
+
+/* Row-based matchfinder */
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
+}
+
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
+}
+
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const searchMethod_e searchMethod, const U32 depth)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
+ const BYTE* const base = ms->window.base;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const dictStart = dictBase + ms->window.lowLimit;
+ const U32 windowLog = ms->cParams.windowLog;
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
+
+ U32 offset_1 = rep[0], offset_2 = rep[1];
+
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
+
+ /* init */
+ ip += (ip == prefixStart);
+ if (searchMethod == search_rowHash) {
+ ZSTD_row_fillHashCache(ms, base, rowLog,
+ MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
+ ms->nextToUpdate, ilimit);
+ }
+
+ /* Match Loop */
+#if defined(__x86_64__)
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
+ */
+ __asm__(".p2align 5");
+#endif
+ while (ip < ilimit) {
+ size_t matchLength=0;
+ size_t offcode=STORE_REPCODE_1;
+ const BYTE* start=ip+1;
+ U32 curr = (U32)(ip-base);
+
+ /* check repCode */
+ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
+ const U32 repIndex = (U32)(curr+1 - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
+ & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ if (depth==0) goto _storeSequence;
+ } }
+
+ /* first search (depth 0) */
+ { size_t offsetFound = 999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offsetFound, mls, rowLog, searchMethod, ZSTD_extDict);
+ if (ml2 > matchLength)
+ matchLength = ml2, start = ip, offcode=offsetFound;
+ }
+
+ if (matchLength < 4) {
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
+ continue;
+ }
+
+ /* let's try to find a better solution */
+ if (depth>=1)
+ while (ip<ilimit) {
+ ip ++;
+ curr++;
+ /* check repCode */
+ if (offcode) {
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 3);
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
+ } }
+
+ /* search match, depth 1 */
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue; /* search a better one */
+ } }
+
+ /* let's find an even better one */
+ if ((depth==2) && (ip<ilimit)) {
+ ip ++;
+ curr++;
+ /* check repCode */
+ if (offcode) {
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
+ const U32 repIndex = (U32)(curr - offset_1);
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ int const gain2 = (int)(repLength * 4);
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1);
+ if ((repLength >= 4) && (gain2 > gain1))
+ matchLength = repLength, offcode = STORE_REPCODE_1, start = ip;
+ } }
+
+ /* search match, depth 2 */
+ { size_t offset2=999999999;
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offset2, mls, rowLog, searchMethod, ZSTD_extDict);
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7);
+ if ((ml2 >= 4) && (gain2 > gain1)) {
+ matchLength = ml2, offcode = offset2, start = ip;
+ continue;
+ } } }
+ break; /* nothing found : store previous solution */
+ }
+
+ /* catch up */
+ if (STORED_IS_OFFSET(offcode)) {
+ U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode));
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
+ offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode);
+ }
+
+ /* store sequence */
+_storeSequence:
+ { size_t const litLength = (size_t)(start - anchor);
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength);
+ anchor = ip = start + matchLength;
+ }
+
+ /* check immediate repcode */
+ while (ip <= ilimit) {
+ const U32 repCurrent = (U32)(ip-base);
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
+ const U32 repIndex = repCurrent - offset_2;
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+ const BYTE* const repMatch = repBase + repIndex;
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
+ /* repcode detected we should take it */
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+ matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+ offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength);
+ ip += matchLength;
+ anchor = ip;
+ continue; /* faster when present ... (?) */
+ }
+ break;
+ } }
+
+ /* Save reps for next block */
+ rep[0] = offset_1;
+ rep[1] = offset_2;
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
+}
+
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize)
+
+{
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
+}
diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
new file mode 100644
index 000000000..e5bdf4df8
--- /dev/null
+++ b/lib/zstd/compress/zstd_lazy.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+
+#include "zstd_compress_internal.h"
+
+/*
+ * Dedicated Dictionary Search Structure bucket log. In the
+ * ZSTD_dedicatedDictSearch mode, the hashTable has
+ * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
+ * one.
+ */
+#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
+
+U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
+void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
+
+void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
+
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+
+size_t ZSTD_compressBlock_btlazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btlazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dictMatchState_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_greedy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict_row(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+
+#endif /* ZSTD_LAZY_H */
diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
new file mode 100644
index 000000000..dd86fc83e
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm.c
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_ldm.h"
+
+#include "../common/debug.h"
+#include <linux/xxhash.h>
+#include "zstd_fast.h" /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
+#include "zstd_ldm_geartab.h"
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+
+typedef struct {
+ U64 rolling;
+ U64 stopMask;
+} ldmRollingHashState_t;
+
+/* ZSTD_ldm_gear_init():
+ *
+ * Initializes the rolling hash state such that it will honor the
+ * settings in params. */
+static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
+{
+ unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
+ unsigned hashRateLog = params->hashRateLog;
+
+ state->rolling = ~(U32)0;
+
+ /* The choice of the splitting criterion is subject to two conditions:
+ * 1. it has to trigger on average every 2^(hashRateLog) bytes;
+ * 2. ideally, it has to depend on a window of minMatchLength bytes.
+ *
+ * In the gear hash algorithm, bit n depends on the last n bytes;
+ * so in order to obtain a good quality splitting criterion it is
+ * preferable to use bits with high weight.
+ *
+ * To match condition 1 we use a mask with hashRateLog bits set
+ * and, because of the previous remark, we make sure these bits
+ * have the highest possible weight while still respecting
+ * condition 2.
+ */
+ if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
+ state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
+ } else {
+ /* In this degenerate case we simply honor the hash rate. */
+ state->stopMask = ((U64)1 << hashRateLog) - 1;
+ }
+}
+
+/* ZSTD_ldm_gear_reset()
+ * Feeds [data, data + minMatchLength) into the hash without registering any
+ * splits. This effectively resets the hash state. This is used when skipping
+ * over data, either at the beginning of a block, or skipping sections.
+ */
+static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
+ BYTE const* data, size_t minMatchLength)
+{
+ U64 hash = state->rolling;
+ size_t n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ } while (0)
+ while (n + 3 < minMatchLength) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < minMatchLength) {
+ GEAR_ITER_ONCE();
+ }
+#undef GEAR_ITER_ONCE
+}
+
+/* ZSTD_ldm_gear_feed():
+ *
+ * Registers in the splits array all the split points found in the first
+ * size bytes following the data pointer. This function terminates when
+ * either all the data has been processed or LDM_BATCH_SIZE splits are
+ * present in the splits array.
+ *
+ * Precondition: The splits array must not be full.
+ * Returns: The number of bytes processed. */
+static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
+ BYTE const* data, size_t size,
+ size_t* splits, unsigned* numSplits)
+{
+ size_t n;
+ U64 hash, mask;
+
+ hash = state->rolling;
+ mask = state->stopMask;
+ n = 0;
+
+#define GEAR_ITER_ONCE() do { \
+ hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
+ n += 1; \
+ if (UNLIKELY((hash & mask) == 0)) { \
+ splits[*numSplits] = n; \
+ *numSplits += 1; \
+ if (*numSplits == LDM_BATCH_SIZE) \
+ goto done; \
+ } \
+ } while (0)
+
+ while (n + 3 < size) {
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ GEAR_ITER_ONCE();
+ }
+ while (n < size) {
+ GEAR_ITER_ONCE();
+ }
+
+#undef GEAR_ITER_ONCE
+
+done:
+ state->rolling = hash;
+ return n;
+}
+
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams)
+{
+ params->windowLog = cParams->windowLog;
+ ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+ DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
+ if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+ if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+ if (params->hashLog == 0) {
+ params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
+ assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+ }
+ if (params->hashRateLog == 0) {
+ params->hashRateLog = params->windowLog < params->hashLog
+ ? 0
+ : params->windowLog - params->hashLog;
+ }
+ params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(ldmParams_t params)
+{
+ size_t const ldmHSize = ((size_t)1) << params.hashLog;
+ size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
+ size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+ size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
+ + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
+ return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
+}
+
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
+{
+ return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
+}
+
+/* ZSTD_ldm_getBucket() :
+ * Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+ ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+ return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/* ZSTD_ldm_insertEntry() :
+ * Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+ size_t const hash, const ldmEntry_t entry,
+ ldmParams_t const ldmParams)
+{
+ BYTE* const pOffset = ldmState->bucketOffsets + hash;
+ unsigned const offset = *pOffset;
+
+ *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
+ *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
+
+}
+
+/* ZSTD_ldm_countBackwardsMatch() :
+ * Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase)
+{
+ size_t matchLength = 0;
+ while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
+ pIn--;
+ pMatch--;
+ matchLength++;
+ }
+ return matchLength;
+}
+
+/* ZSTD_ldm_countBackwardsMatch_2segments() :
+ * Returns the number of bytes that match backwards from pMatch,
+ * even with the backwards match spanning 2 different segments.
+ *
+ * On reaching `pMatchBase`, start counting from mEnd */
+static size_t ZSTD_ldm_countBackwardsMatch_2segments(
+ const BYTE* pIn, const BYTE* pAnchor,
+ const BYTE* pMatch, const BYTE* pMatchBase,
+ const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
+{
+ size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
+ if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
+ /* If backwards match is entirely in the extDict or prefix, immediately return */
+ return matchLength;
+ }
+ DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
+ matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
+ DEBUGLOG(7, "final backwards match length = %zu", matchLength);
+ return matchLength;
+}
+
+/* ZSTD_ldm_fillFastTables() :
+ *
+ * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ * This is similar to ZSTD_loadDictionaryContent.
+ *
+ * The tables for the other strategies are filled within their
+ * block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+ void const* end)
+{
+ const BYTE* const iend = (const BYTE*)end;
+
+ switch(ms->cParams.strategy)
+ {
+ case ZSTD_fast:
+ ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_dfast:
+ ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
+ break;
+
+ case ZSTD_greedy:
+ case ZSTD_lazy:
+ case ZSTD_lazy2:
+ case ZSTD_btlazy2:
+ case ZSTD_btopt:
+ case ZSTD_btultra:
+ case ZSTD_btultra2:
+ break;
+ default:
+ assert(0); /* not possible : not a valid strategy id */
+ }
+
+ return 0;
+}
+
+void ZSTD_ldm_fillHashTable(
+ ldmState_t* ldmState, const BYTE* ip,
+ const BYTE* iend, ldmParams_t const* params)
+{
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const istart = ip;
+ ldmRollingHashState_t hashState;
+ size_t* const splits = ldmState->splitIndices;
+ unsigned numSplits;
+
+ DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
+
+ ZSTD_ldm_gear_init(&hashState, params);
+ while (ip < iend) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ if (ip + splits[n] >= istart + minMatchLength) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = xxh64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+ ldmEntry_t entry;
+
+ entry.offset = (U32)(split - base);
+ entry.checksum = (U32)(xxhash >> 32);
+ ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
+ }
+ }
+
+ ip += hashed;
+ }
+}
+
+
+/* ZSTD_ldm_limitTableUpdate() :
+ *
+ * Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ * if it is far way
+ * (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+{
+ U32 const curr = (U32)(anchor - ms->window.base);
+ if (curr > ms->nextToUpdate + 1024) {
+ ms->nextToUpdate =
+ curr - MIN(512, curr - ms->nextToUpdate - 1024);
+ }
+}
+
+static size_t ZSTD_ldm_generateSequences_internal(
+ ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ /* LDM parameters */
+ int const extDict = ZSTD_window_hasExtDict(ldmState->window);
+ U32 const minMatchLength = params->minMatchLength;
+ U32 const entsPerBucket = 1U << params->bucketSizeLog;
+ U32 const hBits = params->hashLog - params->bucketSizeLog;
+ /* Prefix and extDict parameters */
+ U32 const dictLimit = ldmState->window.dictLimit;
+ U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
+ BYTE const* const base = ldmState->window.base;
+ BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
+ BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
+ BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
+ BYTE const* const lowPrefixPtr = base + dictLimit;
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ BYTE const* const ilimit = iend - HASH_READ_SIZE;
+ /* Input positions */
+ BYTE const* anchor = istart;
+ BYTE const* ip = istart;
+ /* Rolling hash state */
+ ldmRollingHashState_t hashState;
+ /* Arrays for staged-processing */
+ size_t* const splits = ldmState->splitIndices;
+ ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
+ unsigned numSplits;
+
+ if (srcSize < minMatchLength)
+ return iend - anchor;
+
+ /* Initialize the rolling hash state with the first minMatchLength bytes */
+ ZSTD_ldm_gear_init(&hashState, params);
+ ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
+ ip += minMatchLength;
+
+ while (ip < ilimit) {
+ size_t hashed;
+ unsigned n;
+
+ numSplits = 0;
+ hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
+ splits, &numSplits);
+
+ for (n = 0; n < numSplits; n++) {
+ BYTE const* const split = ip + splits[n] - minMatchLength;
+ U64 const xxhash = xxh64(split, minMatchLength, 0);
+ U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
+
+ candidates[n].split = split;
+ candidates[n].hash = hash;
+ candidates[n].checksum = (U32)(xxhash >> 32);
+ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
+ PREFETCH_L1(candidates[n].bucket);
+ }
+
+ for (n = 0; n < numSplits; n++) {
+ size_t forwardMatchLength = 0, backwardMatchLength = 0,
+ bestMatchLength = 0, mLength;
+ U32 offset;
+ BYTE const* const split = candidates[n].split;
+ U32 const checksum = candidates[n].checksum;
+ U32 const hash = candidates[n].hash;
+ ldmEntry_t* const bucket = candidates[n].bucket;
+ ldmEntry_t const* cur;
+ ldmEntry_t const* bestEntry = NULL;
+ ldmEntry_t newEntry;
+
+ newEntry.offset = (U32)(split - base);
+ newEntry.checksum = checksum;
+
+ /* If a split point would generate a sequence overlapping with
+ * the previous one, we merely register it in the hash table and
+ * move on */
+ if (split < anchor) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
+
+ for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
+ size_t curForwardMatchLength, curBackwardMatchLength,
+ curTotalMatchLength;
+ if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+ continue;
+ }
+ if (extDict) {
+ BYTE const* const curMatchBase =
+ cur->offset < dictLimit ? dictBase : base;
+ BYTE const* const pMatch = curMatchBase + cur->offset;
+ BYTE const* const matchEnd =
+ cur->offset < dictLimit ? dictEnd : iend;
+ BYTE const* const lowMatchPtr =
+ cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+ curForwardMatchLength =
+ ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
+ split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
+ } else { /* !extDict */
+ BYTE const* const pMatch = base + cur->offset;
+ curForwardMatchLength = ZSTD_count(split, pMatch, iend);
+ if (curForwardMatchLength < minMatchLength) {
+ continue;
+ }
+ curBackwardMatchLength =
+ ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
+ }
+ curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
+
+ if (curTotalMatchLength > bestMatchLength) {
+ bestMatchLength = curTotalMatchLength;
+ forwardMatchLength = curForwardMatchLength;
+ backwardMatchLength = curBackwardMatchLength;
+ bestEntry = cur;
+ }
+ }
+
+ /* No match found -- insert an entry into the hash table
+ * and process the next candidate match */
+ if (bestEntry == NULL) {
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+ continue;
+ }
+
+ /* Match found */
+ offset = (U32)(split - base) - bestEntry->offset;
+ mLength = forwardMatchLength + backwardMatchLength;
+ {
+ rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
+
+ /* Out of sequence storage */
+ if (rawSeqStore->size == rawSeqStore->capacity)
+ return ERROR(dstSize_tooSmall);
+ seq->litLength = (U32)(split - backwardMatchLength - anchor);
+ seq->matchLength = (U32)mLength;
+ seq->offset = offset;
+ rawSeqStore->size++;
+ }
+
+ /* Insert the current entry into the hash table --- it must be
+ * done after the previous block to avoid clobbering bestEntry */
+ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
+
+ anchor = split + forwardMatchLength;
+
+ /* If we find a match that ends after the data that we've hashed
+ * then we have a repeating, overlapping, pattern. E.g. all zeros.
+ * If one repetition of the pattern matches our `stopMask` then all
+ * repetitions will. We don't need to insert them all into out table,
+ * only the first one. So skip over overlapping matches.
+ * This is a major speed boost (20x) for compressing a single byte
+ * repeated, when that byte ends up in the table.
+ */
+ if (anchor > ip + hashed) {
+ ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
+ /* Continue the outer loop at anchor (ip + hashed == anchor). */
+ ip = anchor - hashed;
+ break;
+ }
+ }
+
+ ip += hashed;
+ }
+
+ return iend - anchor;
+}
+
+/*! ZSTD_ldm_reduceTable() :
+ * reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+ U32 const reducerValue)
+{
+ U32 u;
+ for (u = 0; u < size; u++) {
+ if (table[u].offset < reducerValue) table[u].offset = 0;
+ else table[u].offset -= reducerValue;
+ }
+}
+
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldmState, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize)
+{
+ U32 const maxDist = 1U << params->windowLog;
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ size_t const kMaxChunkSize = 1 << 20;
+ size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
+ size_t chunk;
+ size_t leftoverSize = 0;
+
+ assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
+ /* Check that ZSTD_window_update() has been called for this chunk prior
+ * to passing it to this function.
+ */
+ assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
+ /* The input could be very large (in zstdmt), so it must be broken up into
+ * chunks to enforce the maximum distance and handle overflow correction.
+ */
+ assert(sequences->pos <= sequences->size);
+ assert(sequences->size <= sequences->capacity);
+ for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
+ BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
+ size_t const remaining = (size_t)(iend - chunkStart);
+ BYTE const *const chunkEnd =
+ (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
+ size_t const chunkSize = chunkEnd - chunkStart;
+ size_t newLeftoverSize;
+ size_t const prevSize = sequences->size;
+
+ assert(chunkStart < iend);
+ /* 1. Perform overflow correction if necessary. */
+ if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
+ U32 const ldmHSize = 1U << params->hashLog;
+ U32 const correction = ZSTD_window_correctOverflow(
+ &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
+ ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
+ /* invalidate dictionaries on overflow correction */
+ ldmState->loadedDictEnd = 0;
+ }
+ /* 2. We enforce the maximum offset allowed.
+ *
+ * kMaxChunkSize should be small enough that we don't lose too much of
+ * the window through early invalidation.
+ * TODO: * Test the chunk size.
+ * * Try invalidation after the sequence generation and test the
+ * the offset against maxDist directly.
+ *
+ * NOTE: Because of dictionaries + sequence splitting we MUST make sure
+ * that any offset used is valid at the END of the sequence, since it may
+ * be split into two sequences. This condition holds when using
+ * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
+ * against maxDist directly, we'll have to carefully handle that case.
+ */
+ ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
+ /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
+ newLeftoverSize = ZSTD_ldm_generateSequences_internal(
+ ldmState, sequences, params, chunkStart, chunkSize);
+ if (ZSTD_isError(newLeftoverSize))
+ return newLeftoverSize;
+ /* 4. We add the leftover literals from previous iterations to the first
+ * newly generated sequence, or add the `newLeftoverSize` if none are
+ * generated.
+ */
+ /* Prepend the leftover literals from the last call */
+ if (prevSize < sequences->size) {
+ sequences->seq[prevSize].litLength += (U32)leftoverSize;
+ leftoverSize = newLeftoverSize;
+ } else {
+ assert(newLeftoverSize == chunkSize);
+ leftoverSize += chunkSize;
+ }
+ }
+ return 0;
+}
+
+void
+ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
+{
+ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
+ if (srcSize <= seq->litLength) {
+ /* Skip past srcSize literals */
+ seq->litLength -= (U32)srcSize;
+ return;
+ }
+ srcSize -= seq->litLength;
+ seq->litLength = 0;
+ if (srcSize < seq->matchLength) {
+ /* Skip past the first srcSize of the match */
+ seq->matchLength -= (U32)srcSize;
+ if (seq->matchLength < minMatch) {
+ /* The match is too short, omit it */
+ if (rawSeqStore->pos + 1 < rawSeqStore->size) {
+ seq[1].litLength += seq[0].matchLength;
+ }
+ rawSeqStore->pos++;
+ }
+ return;
+ }
+ srcSize -= seq->matchLength;
+ seq->matchLength = 0;
+ rawSeqStore->pos++;
+ }
+}
+
+/*
+ * If the sequence length is longer than remaining then the sequence is split
+ * between this block and the next.
+ *
+ * Returns the current sequence to handle, or if the rest of the block should
+ * be literals, it returns a sequence with offset == 0.
+ */
+static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+ U32 const remaining, U32 const minMatch)
+{
+ rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
+ assert(sequence.offset > 0);
+ /* Likely: No partial sequence */
+ if (remaining >= sequence.litLength + sequence.matchLength) {
+ rawSeqStore->pos++;
+ return sequence;
+ }
+ /* Cut the sequence short (offset == 0 ==> rest is literals). */
+ if (remaining <= sequence.litLength) {
+ sequence.offset = 0;
+ } else if (remaining < sequence.litLength + sequence.matchLength) {
+ sequence.matchLength = remaining - sequence.litLength;
+ if (sequence.matchLength < minMatch) {
+ sequence.offset = 0;
+ }
+ }
+ /* Skip past `remaining` bytes for the future sequences. */
+ ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
+ return sequence;
+}
+
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_paramSwitch_e useRowMatchFinder,
+ void const* src, size_t srcSize)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ unsigned const minMatch = cParams->minMatch;
+ ZSTD_blockCompressor const blockCompressor =
+ ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
+ /* Input bounds */
+ BYTE const* const istart = (BYTE const*)src;
+ BYTE const* const iend = istart + srcSize;
+ /* Input positions */
+ BYTE const* ip = istart;
+
+ DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
+ /* If using opt parser, use LDMs only as candidates rather than always accepting them */
+ if (cParams->strategy >= ZSTD_btopt) {
+ size_t lastLLSize;
+ ms->ldmSeqStore = rawSeqStore;
+ lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
+ ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
+ return lastLLSize;
+ }
+
+ assert(rawSeqStore->pos <= rawSeqStore->size);
+ assert(rawSeqStore->size <= rawSeqStore->capacity);
+ /* Loop through each sequence and apply the block compressor to the literals */
+ while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
+ /* maybeSplitSequence updates rawSeqStore->pos */
+ rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+ (U32)(iend - ip), minMatch);
+ int i;
+ /* End signal */
+ if (sequence.offset == 0)
+ break;
+
+ assert(ip + sequence.litLength + sequence.matchLength <= iend);
+
+ /* Fill tables for block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Run the block compressor */
+ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
+ {
+ size_t const newLitLength =
+ blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
+ ip += sequence.litLength;
+ /* Update the repcodes */
+ for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+ rep[i] = rep[i-1];
+ rep[0] = sequence.offset;
+ /* Store the sequence */
+ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
+ STORE_OFFSET(sequence.offset),
+ sequence.matchLength);
+ ip += sequence.matchLength;
+ }
+ }
+ /* Fill the tables for the block compressor */
+ ZSTD_ldm_limitTableUpdate(ms, ip);
+ ZSTD_ldm_fillFastTables(ms, ip);
+ /* Compress the last literals */
+ return blockCompressor(ms, seqStore, rep, ip, iend - ip);
+}
diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
new file mode 100644
index 000000000..fbc6a5e88
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+
+#include "zstd_compress_internal.h" /* ldmParams_t, U32 */
+#include <linux/zstd.h> /* ZSTD_CCtx, size_t */
+
+/*-*************************************
+* Long distance matching
+***************************************/
+
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
+
+void ZSTD_ldm_fillHashTable(
+ ldmState_t* state, const BYTE* ip,
+ const BYTE* iend, ldmParams_t const* params);
+
+/*
+ * ZSTD_ldm_generateSequences():
+ *
+ * Generates the sequences using the long distance match finder.
+ * Generates long range matching sequences in `sequences`, which parse a prefix
+ * of the source. `sequences` must be large enough to store every sequence,
+ * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
+ * @returns 0 or an error code.
+ *
+ * NOTE: The user must have called ZSTD_window_update() for all of the input
+ * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
+ * NOTE: This function returns an error if it runs out of space to store
+ * sequences.
+ */
+size_t ZSTD_ldm_generateSequences(
+ ldmState_t* ldms, rawSeqStore_t* sequences,
+ ldmParams_t const* params, void const* src, size_t srcSize);
+
+/*
+ * ZSTD_ldm_blockCompress():
+ *
+ * Compresses a block using the predefined sequences, along with a secondary
+ * block compressor. The literals section of every sequence is passed to the
+ * secondary block compressor, and those sequences are interspersed with the
+ * predefined sequences. Returns the length of the last literals.
+ * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
+ * `rawSeqStore.seq` may also be updated to split the last sequence between two
+ * blocks.
+ * @return The length of the last literals.
+ *
+ * NOTE: The source must be at most the maximum block size, but the predefined
+ * sequences can be any size, and may be longer than the block. In the case that
+ * they are longer than the block, the last sequences may need to be split into
+ * two. We handle that case correctly, and update `rawSeqStore` appropriately.
+ * NOTE: This function does not return any errors.
+ */
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ ZSTD_paramSwitch_e useRowMatchFinder,
+ void const* src, size_t srcSize);
+
+/*
+ * ZSTD_ldm_skipSequences():
+ *
+ * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
+ * Avoids emitting matches less than `minMatch` bytes.
+ * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+ U32 const minMatch);
+
+/* ZSTD_ldm_skipRawSeqStoreBytes():
+ * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
+ * Not to be used in conjunction with ZSTD_ldm_skipSequences().
+ * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
+
+/* ZSTD_ldm_getTableSize() :
+ * Estimate the space needed for long distance matching tables or 0 if LDM is
+ * disabled.
+ */
+size_t ZSTD_ldm_getTableSize(ldmParams_t params);
+
+/* ZSTD_ldm_getSeqSpace() :
+ * Return an upper bound on the number of sequences that can be produced by
+ * the long distance matcher, or 0 if LDM is disabled.
+ */
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
+
+/* ZSTD_ldm_adjustParameters() :
+ * If the params->hashRateLog is not set, set it to its default value based on
+ * windowLog and params->hashLog.
+ *
+ * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ * params->hashLog if it is not).
+ *
+ * Ensures that the minMatchLength >= targetLength during optimal parsing.
+ */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+ ZSTD_compressionParameters const* cParams);
+
+
+#endif /* ZSTD_FAST_H */
diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
new file mode 100644
index 000000000..647f865be
--- /dev/null
+++ b/lib/zstd/compress/zstd_ldm_geartab.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LDM_GEARTAB_H
+#define ZSTD_LDM_GEARTAB_H
+
+#include "../common/compiler.h" /* UNUSED_ATTR */
+#include "../common/mem.h" /* U64 */
+
+static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = {
+ 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
+ 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
+ 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
+ 0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
+ 0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
+ 0x37b628620b628, 0x49a8d455d88caf5, 0x8556d711e6958140,
+ 0x4f7ae74fc605c1f, 0x829f0c3468bd3a20, 0x4ffdc885c625179e,
+ 0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
+ 0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
+ 0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
+ 0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
+ 0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
+ 0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
+ 0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
+ 0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
+ 0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
+ 0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
+ 0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
+ 0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
+ 0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
+ 0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
+ 0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
+ 0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
+ 0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
+ 0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
+ 0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
+ 0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
+ 0xff452823dbb010a, 0x9d42ed614f3dd267, 0x5b9313c06257c57b,
+ 0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
+ 0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
+ 0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
+ 0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
+ 0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
+ 0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
+ 0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
+ 0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
+ 0x24a5483879c453e3, 0x88026889192b4b9, 0x28da96671782dbec,
+ 0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
+ 0xbc135a0a704b70ba, 0x69cd868f7622ada, 0xbc37ba89e0b9c0ab,
+ 0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
+ 0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
+ 0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
+ 0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
+ 0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
+ 0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
+ 0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
+ 0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
+ 0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
+ 0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
+ 0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
+ 0x820d471e20b348e, 0x1874383cb83d46dc, 0x97edeec7a1efe11c,
+ 0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
+ 0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
+ 0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
+ 0xaf846af6ab7d0bf4, 0xe5af208eb666e49, 0x5e6622f73534cd6a,
+ 0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
+ 0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
+ 0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
+ 0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
+ 0x9f90e4c5fd508d8, 0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
+ 0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
+ 0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
+ 0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
+ 0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
+ 0x4228e364c5b5ed7, 0x9d7a3edf0da43911, 0x8edcfeda24686756,
+ 0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
+ 0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
+ 0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
+ 0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
+ 0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
+ 0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
+ 0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
+ 0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
+ 0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
+ 0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
+ 0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
+ 0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
+ 0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
+ 0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
+ 0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
+ 0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
+ 0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
+ 0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
+ 0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
+ 0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
+ 0x2b4da14f2613d8f4
+};
+
+#endif /* ZSTD_LDM_GEARTAB_H */
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
new file mode 100644
index 000000000..fd82acfda
--- /dev/null
+++ b/lib/zstd/compress/zstd_opt.c
@@ -0,0 +1,1446 @@
+/*
+ * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "hist.h"
+#include "zstd_opt.h"
+
+
+#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
+#define ZSTD_MAX_PRICE (1<<30)
+
+#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
+
+
+/*-*************************************
+* Price functions for optimal parser
+***************************************/
+
+#if 0 /* approximation at bit level (for tests) */
+# define BITCOST_ACCURACY 0
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat))
+#elif 0 /* fractional bit accuracy (for tests) */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
+#else /* opt==approx, ultra==accurate */
+# define BITCOST_ACCURACY 8
+# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
+# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
+#endif
+
+MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
+{
+ return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
+}
+
+MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
+{
+ U32 const stat = rawStat + 1;
+ U32 const hb = ZSTD_highbit32(stat);
+ U32 const BWeight = hb * BITCOST_MULTIPLIER;
+ U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
+ U32 const weight = BWeight + FWeight;
+ assert(hb + BITCOST_ACCURACY < 31);
+ return weight;
+}
+
+#if (DEBUGLEVEL>=2)
+/* debugging function,
+ * @return price in bytes as fractional value
+ * for debug messages only */
+MEM_STATIC double ZSTD_fCost(U32 price)
+{
+ return (double)price / (BITCOST_MULTIPLIER*8);
+}
+#endif
+
+static int ZSTD_compressedLiterals(optState_t const* const optPtr)
+{
+ return optPtr->literalCompressionMode != ZSTD_ps_disable;
+}
+
+static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
+{
+ if (ZSTD_compressedLiterals(optPtr))
+ optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
+ optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
+ optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
+ optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
+}
+
+
+static U32 sum_u32(const unsigned table[], size_t nbElts)
+{
+ size_t n;
+ U32 total = 0;
+ for (n=0; n<nbElts; n++) {
+ total += table[n];
+ }
+ return total;
+}
+
+static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift)
+{
+ U32 s, sum=0;
+ DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift);
+ assert(shift < 30);
+ for (s=0; s<lastEltIndex+1; s++) {
+ table[s] = 1 + (table[s] >> shift);
+ sum += table[s];
+ }
+ return sum;
+}
+
+/* ZSTD_scaleStats() :
+ * reduce all elements in table is sum too large
+ * return the resulting sum of elements */
+static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
+{
+ U32 const prevsum = sum_u32(table, lastEltIndex+1);
+ U32 const factor = prevsum >> logTarget;
+ DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
+ assert(logTarget < 30);
+ if (factor <= 1) return prevsum;
+ return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor));
+}
+
+/* ZSTD_rescaleFreqs() :
+ * if first block (detected by optPtr->litLengthSum == 0) : init statistics
+ * take hints from dictionary if there is one
+ * and init from zero if there is none,
+ * using src for literals stats, and baseline stats for sequence symbols
+ * otherwise downscale existing stats, to be used as seed for next block.
+ */
+static void
+ZSTD_rescaleFreqs(optState_t* const optPtr,
+ const BYTE* const src, size_t const srcSize,
+ int const optLevel)
+{
+ int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
+ DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
+ optPtr->priceType = zop_dynamic;
+
+ if (optPtr->litLengthSum == 0) { /* first block : init */
+ if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
+ DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
+ optPtr->priceType = zop_predef;
+ }
+
+ assert(optPtr->symbolCosts != NULL);
+ if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
+ /* huffman table presumed generated by dictionary */
+ optPtr->priceType = zop_dynamic;
+
+ if (compressedLiterals) {
+ unsigned lit;
+ assert(optPtr->litFreq != NULL);
+ optPtr->litSum = 0;
+ for (lit=0; lit<=MaxLit; lit++) {
+ U32 const scaleLog = 11; /* scale to 2K */
+ U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit);
+ assert(bitCost <= scaleLog);
+ optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litSum += optPtr->litFreq[lit];
+ } }
+
+ { unsigned ll;
+ FSE_CState_t llstate;
+ FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
+ optPtr->litLengthSum = 0;
+ for (ll=0; ll<=MaxLL; ll++) {
+ U32 const scaleLog = 10; /* scale to 1K */
+ U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
+ assert(bitCost < scaleLog);
+ optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->litLengthSum += optPtr->litLengthFreq[ll];
+ } }
+
+ { unsigned ml;
+ FSE_CState_t mlstate;
+ FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
+ optPtr->matchLengthSum = 0;
+ for (ml=0; ml<=MaxML; ml++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
+ assert(bitCost < scaleLog);
+ optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
+ } }
+
+ { unsigned of;
+ FSE_CState_t ofstate;
+ FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
+ optPtr->offCodeSum = 0;
+ for (of=0; of<=MaxOff; of++) {
+ U32 const scaleLog = 10;
+ U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
+ assert(bitCost < scaleLog);
+ optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
+ optPtr->offCodeSum += optPtr->offCodeFreq[of];
+ } }
+
+ } else { /* not a dictionary */
+
+ assert(optPtr->litFreq != NULL);
+ if (compressedLiterals) {
+ unsigned lit = MaxLit;
+ HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
+ optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8);
+ }
+
+ { unsigned const baseLLfreqs[MaxLL+1] = {
+ 4, 2, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1
+ };
+ ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
+ optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
+ }
+
+ { unsigned ml;
+ for (ml=0; ml<=MaxML; ml++)
+ optPtr->matchLengthFreq[ml] = 1;
+ }
+ optPtr->matchLengthSum = MaxML+1;
+
+ { unsigned const baseOFCfreqs[MaxOff+1] = {
+ 6, 2, 1, 1, 2, 3, 4, 4,
+ 4, 3, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1
+ };
+ ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
+ optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
+ }
+
+
+ }
+
+ } else { /* new block : re-use previous statistics, scaled down */
+
+ if (compressedLiterals)
+ optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
+ optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
+ optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
+ optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
+ }
+
+ ZSTD_setBasePrices(optPtr, optLevel);
+}
+
+/* ZSTD_rawLiteralsCost() :
+ * price of literals (only) in specified segment (which length can be 0).
+ * does not include price of literalLength symbol */
+static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+ const optState_t* const optPtr,
+ int optLevel)
+{
+ if (litLength == 0) return 0;
+
+ if (!ZSTD_compressedLiterals(optPtr))
+ return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
+
+ if (optPtr->priceType == zop_predef)
+ return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
+
+ /* dynamic statistics */
+ { U32 price = litLength * optPtr->litSumBasePrice;
+ U32 u;
+ for (u=0; u < litLength; u++) {
+ assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
+ price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
+ }
+ return price;
+ }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of literalLength symbol */
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
+{
+ assert(litLength <= ZSTD_BLOCKSIZE_MAX);
+ if (optPtr->priceType == zop_predef)
+ return WEIGHT(litLength, optLevel);
+ /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
+ * because it isn't representable in the zstd format. So instead just
+ * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block
+ * would be all literals.
+ */
+ if (litLength == ZSTD_BLOCKSIZE_MAX)
+ return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
+
+ /* dynamic statistics */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ return (LL_bits[llCode] * BITCOST_MULTIPLIER)
+ + optPtr->litLengthSumBasePrice
+ - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
+ }
+}
+
+/* ZSTD_getMatchPrice() :
+ * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
+ * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2
+ * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
+ */
+FORCE_INLINE_TEMPLATE U32
+ZSTD_getMatchPrice(U32 const offcode,
+ U32 const matchLength,
+ const optState_t* const optPtr,
+ int const optLevel)
+{
+ U32 price;
+ U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode));
+ U32 const mlBase = matchLength - MINMATCH;
+ assert(matchLength >= MINMATCH);
+
+ if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
+ return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
+
+ /* dynamic statistics */
+ price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
+ if ((optLevel<2) /*static*/ && offCode >= 20)
+ price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
+
+ /* match Length */
+ { U32 const mlCode = ZSTD_MLcode(mlBase);
+ price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
+ }
+
+ price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
+
+ DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
+ return price;
+}
+
+/* ZSTD_updateStats() :
+ * assumption : literals + litLengtn <= iend */
+static void ZSTD_updateStats(optState_t* const optPtr,
+ U32 litLength, const BYTE* literals,
+ U32 offsetCode, U32 matchLength)
+{
+ /* literals */
+ if (ZSTD_compressedLiterals(optPtr)) {
+ U32 u;
+ for (u=0; u < litLength; u++)
+ optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+ optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+ }
+
+ /* literal Length */
+ { U32 const llCode = ZSTD_LLcode(litLength);
+ optPtr->litLengthFreq[llCode]++;
+ optPtr->litLengthSum++;
+ }
+
+ /* offset code : expected to follow storeSeq() numeric representation */
+ { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode));
+ assert(offCode <= MaxOff);
+ optPtr->offCodeFreq[offCode]++;
+ optPtr->offCodeSum++;
+ }
+
+ /* match Length */
+ { U32 const mlBase = matchLength - MINMATCH;
+ U32 const mlCode = ZSTD_MLcode(mlBase);
+ optPtr->matchLengthFreq[mlCode]++;
+ optPtr->matchLengthSum++;
+ }
+}
+
+
+/* ZSTD_readMINMATCH() :
+ * function safe only for comparisons
+ * assumption : memPtr must be at least 4 bytes before end of buffer */
+MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+ switch (length)
+ {
+ default :
+ case 4 : return MEM_read32(memPtr);
+ case 3 : if (MEM_isLittleEndian())
+ return MEM_read32(memPtr)<<8;
+ else
+ return MEM_read32(memPtr)>>8;
+ }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+ Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip)
+{
+ U32* const hashTable3 = ms->hashTable3;
+ U32 const hashLog3 = ms->hashLog3;
+ const BYTE* const base = ms->window.base;
+ U32 idx = *nextToUpdate3;
+ U32 const target = (U32)(ip - base);
+ size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+ assert(hashLog3 > 0);
+
+ while(idx < target) {
+ hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+ idx++;
+ }
+
+ *nextToUpdate3 = target;
+ return hashTable3[hash3];
+}
+
+
+/*-*************************************
+* Binary Tree search
+***************************************/
+/* ZSTD_insertBt1() : add one or multiple positions to tree.
+ * @param ip assumed <= iend-8 .
+ * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
+ * @return : nb of positions added */
+static U32 ZSTD_insertBt1(
+ const ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ U32 const target,
+ U32 const mls, const int extDict)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32* const hashTable = ms->hashTable;
+ U32 const hashLog = cParams->hashLog;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask = (1 << btLog) - 1;
+ U32 matchIndex = hashTable[h];
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const dictBase = ms->window.dictBase;
+ const U32 dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ const BYTE* match;
+ const U32 curr = (U32)(ip-base);
+ const U32 btLow = btMask >= curr ? 0 : curr - btMask;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = smallerPtr + 1;
+ U32 dummy32; /* to be nullified at the end */
+ /* windowLow is based on target because
+ * we only need positions that will be in the window at the end of the tree update.
+ */
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
+ U32 matchEndIdx = curr+8+1;
+ size_t bestLength = 8;
+ U32 nbCompares = 1U << cParams->searchLog;
+#ifdef ZSTD_C_PREDICT
+ U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
+ U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
+ predictedSmall += (predictedSmall>0);
+ predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+ DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
+
+ assert(curr <= target);
+ assert(ip <= iend-8); /* required for h calculation */
+ hashTable[h] = curr; /* Update Hash Table */
+
+ assert(windowLow > 0);
+ for (; nbCompares && (matchIndex >= windowLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(matchIndex < curr);
+
+#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
+ if (matchIndex == predictedSmall) {
+ /* no need to check length, result known */
+ *smallerPtr = matchIndex;
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+ continue;
+ }
+ if (matchIndex == predictedLarge) {
+ *largerPtr = matchIndex;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+ continue;
+ }
+#endif
+
+ if (!extDict || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */
+ match = base + matchIndex;
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+ } else {
+ match = dictBase + matchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
+ }
+
+ if (matchLength > bestLength) {
+ bestLength = matchLength;
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ }
+
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+ }
+
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
+ /* match is smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
+ } else {
+ /* match is larger than current */
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+ { U32 positions = 0;
+ if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */
+ assert(matchEndIdx > curr + 8);
+ return MAX(positions, matchEndIdx - (curr + 8));
+ }
+}
+
+FORCE_INLINE_TEMPLATE
+void ZSTD_updateTree_internal(
+ ZSTD_matchState_t* ms,
+ const BYTE* const ip, const BYTE* const iend,
+ const U32 mls, const ZSTD_dictMode_e dictMode)
+{
+ const BYTE* const base = ms->window.base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = ms->nextToUpdate;
+ DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)",
+ idx, target, dictMode);
+
+ while(idx < target) {
+ U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict);
+ assert(idx < (U32)(idx + forward));
+ idx += forward;
+ }
+ assert((size_t)(ip - base) <= (size_t)(U32)(-1));
+ assert((size_t)(iend - base) <= (size_t)(U32)(-1));
+ ms->nextToUpdate = target;
+}
+
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
+ ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_insertBtAndGetAllMatches (
+ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
+ const U32 lengthToBeat,
+ U32 const mls /* template */)
+{
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ const BYTE* const base = ms->window.base;
+ U32 const curr = (U32)(ip-base);
+ U32 const hashLog = cParams->hashLog;
+ U32 const minMatch = (mls==3) ? 3 : 4;
+ U32* const hashTable = ms->hashTable;
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
+ U32 matchIndex = hashTable[h];
+ U32* const bt = ms->chainTable;
+ U32 const btLog = cParams->chainLog - 1;
+ U32 const btMask= (1U << btLog) - 1;
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
+ const BYTE* const dictBase = ms->window.dictBase;
+ U32 const dictLimit = ms->window.dictLimit;
+ const BYTE* const dictEnd = dictBase + dictLimit;
+ const BYTE* const prefixStart = base + dictLimit;
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
+ U32 const matchLow = windowLow ? windowLow : 1;
+ U32* smallerPtr = bt + 2*(curr&btMask);
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
+ U32 matchEndIdx = curr+8+1; /* farthest referenced position of any match => detects repetitive patterns */
+ U32 dummy32; /* to be nullified at the end */
+ U32 mnum = 0;
+ U32 nbCompares = 1U << cParams->searchLog;
+
+ const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
+ const ZSTD_compressionParameters* const dmsCParams =
+ dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
+ const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
+ const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
+ U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
+ U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
+ U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
+ U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
+ U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
+ U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
+ U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
+
+ size_t bestLength = lengthToBeat-1;
+ DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
+
+ /* check repCode */
+ assert(ll0 <= 1); /* necessarily 1 or 0 */
+ { U32 const lastR = ZSTD_REP_NUM + ll0;
+ U32 repCode;
+ for (repCode = ll0; repCode < lastR; repCode++) {
+ U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+ U32 const repIndex = curr - repOffset;
+ U32 repLen = 0;
+ assert(curr >= dictLimit);
+ if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) { /* equivalent to `curr > repIndex >= dictLimit` */
+ /* We must validate the repcode offset because when we're using a dictionary the
+ * valid offset range shrinks when the dictionary goes out of bounds.
+ */
+ if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
+ repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
+ }
+ } else { /* repIndex < dictLimit || repIndex >= curr */
+ const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
+ dmsBase + repIndex - dmsIndexDelta :
+ dictBase + repIndex;
+ assert(curr >= windowLow);
+ if ( dictMode == ZSTD_extDict
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */
+ & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+ }
+ if (dictMode == ZSTD_dictMatchState
+ && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */
+ & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
+ && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+ repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
+ } }
+ /* save longer solution */
+ if (repLen > bestLength) {
+ DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
+ repCode, ll0, repOffset, repLen);
+ bestLength = repLen;
+ matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */
+ matches[mnum].len = (U32)repLen;
+ mnum++;
+ if ( (repLen > sufficient_len)
+ | (ip+repLen == iLimit) ) { /* best possible */
+ return mnum;
+ } } } }
+
+ /* HC3 match finder */
+ if ((mls == 3) /*static*/ && (bestLength < mls)) {
+ U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
+ if ((matchIndex3 >= matchLow)
+ & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
+ size_t mlen;
+ if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
+ const BYTE* const match = base + matchIndex3;
+ mlen = ZSTD_count(ip, match, iLimit);
+ } else {
+ const BYTE* const match = dictBase + matchIndex3;
+ mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
+ }
+
+ /* save best solution */
+ if (mlen >= mls /* == 3 > bestLength */) {
+ DEBUGLOG(8, "found small match with hlog3, of length %u",
+ (U32)mlen);
+ bestLength = mlen;
+ assert(curr > matchIndex3);
+ assert(mnum==0); /* no prior solution */
+ matches[0].off = STORE_OFFSET(curr - matchIndex3);
+ matches[0].len = (U32)mlen;
+ mnum = 1;
+ if ( (mlen > sufficient_len) |
+ (ip+mlen == iLimit) ) { /* best possible length */
+ ms->nextToUpdate = curr+1; /* skip insertion */
+ return 1;
+ } } }
+ /* no dictMatchState lookup: dicts don't have a populated HC3 table */
+ } /* if (mls == 3) */
+
+ hashTable[h] = curr; /* Update Hash Table */
+
+ for (; nbCompares && (matchIndex >= matchLow); --nbCompares) {
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
+ const BYTE* match;
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ assert(curr > matchIndex);
+
+ if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
+ assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */
+ match = base + matchIndex;
+ if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
+ } else {
+ match = dictBase + matchIndex;
+ assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+ if (matchIndex+matchLength >= dictLimit)
+ match = base + matchIndex; /* prepare for match[matchLength] read */
+ }
+
+ if (matchLength > bestLength) {
+ DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
+ assert(matchEndIdx > matchIndex);
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = STORE_OFFSET(curr - matchIndex);
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
+ break; /* drop, to preserve bt consistency (miss a little bit of compression) */
+ } }
+
+ if (match[matchLength] < ip[matchLength]) {
+ /* match smaller than current */
+ *smallerPtr = matchIndex; /* update smaller idx */
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */
+ matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */
+ } else {
+ *largerPtr = matchIndex;
+ commonLengthLarger = matchLength;
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
+ largerPtr = nextPtr;
+ matchIndex = nextPtr[0];
+ } }
+
+ *smallerPtr = *largerPtr = 0;
+
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
+ if (dictMode == ZSTD_dictMatchState && nbCompares) {
+ size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
+ U32 dictMatchIndex = dms->hashTable[dmsH];
+ const U32* const dmsBt = dms->chainTable;
+ commonLengthSmaller = commonLengthLarger = 0;
+ for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) {
+ const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
+ const BYTE* match = dmsBase + dictMatchIndex;
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
+ if (dictMatchIndex+matchLength >= dmsHighLimit)
+ match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */
+
+ if (matchLength > bestLength) {
+ matchIndex = dictMatchIndex + dmsIndexDelta;
+ DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
+ (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex));
+ if (matchLength > matchEndIdx - matchIndex)
+ matchEndIdx = matchIndex + (U32)matchLength;
+ bestLength = matchLength;
+ matches[mnum].off = STORE_OFFSET(curr - matchIndex);
+ matches[mnum].len = (U32)matchLength;
+ mnum++;
+ if ( (matchLength > ZSTD_OPT_NUM)
+ | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
+ } }
+
+ if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */
+ if (match[matchLength] < ip[matchLength]) {
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
+ dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
+ } else {
+ /* match is larger than current */
+ commonLengthLarger = matchLength;
+ dictMatchIndex = nextPtr[0];
+ } } } /* if (dictMode == ZSTD_dictMatchState) */
+
+ assert(matchEndIdx > curr+8);
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
+ return mnum;
+}
+
+typedef U32 (*ZSTD_getAllMatchesFn)(
+ ZSTD_match_t*,
+ ZSTD_matchState_t*,
+ U32*,
+ const BYTE*,
+ const BYTE*,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat);
+
+FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
+ ZSTD_match_t* matches,
+ ZSTD_matchState_t* ms,
+ U32* nextToUpdate3,
+ const BYTE* ip,
+ const BYTE* const iHighLimit,
+ const U32 rep[ZSTD_REP_NUM],
+ U32 const ll0,
+ U32 const lengthToBeat,
+ const ZSTD_dictMode_e dictMode,
+ const U32 mls)
+{
+ assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
+ DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
+ if (ip < ms->window.base + ms->nextToUpdate)
+ return 0; /* skipped area */
+ ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
+ return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
+}
+
+#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
+
+#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
+ static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
+ ZSTD_match_t* matches, \
+ ZSTD_matchState_t* ms, \
+ U32* nextToUpdate3, \
+ const BYTE* ip, \
+ const BYTE* const iHighLimit, \
+ const U32 rep[ZSTD_REP_NUM], \
+ U32 const ll0, \
+ U32 const lengthToBeat) \
+ { \
+ return ZSTD_btGetAllMatches_internal( \
+ matches, ms, nextToUpdate3, ip, iHighLimit, \
+ rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
+ }
+
+#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
+ GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
+
+GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
+GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
+GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
+
+#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
+ { \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
+ ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
+ }
+
+static ZSTD_getAllMatchesFn
+ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
+{
+ ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
+ ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
+ };
+ U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
+ assert((U32)dictMode < 3);
+ assert(mls - 3 < 4);
+ return getAllMatchesFns[(int)dictMode][mls - 3];
+}
+
+/* ***********************
+* LDM helper functions *
+*************************/
+
+/* Struct containing info needed to make decision about ldm inclusion */
+typedef struct {
+ rawSeqStore_t seqStore; /* External match candidates store for this block */
+ U32 startPosInBlock; /* Start position of the current match candidate */
+ U32 endPosInBlock; /* End position of the current match candidate */
+ U32 offset; /* Offset of the match candidate */
+} ZSTD_optLdm_t;
+
+/* ZSTD_optLdm_skipRawSeqStoreBytes():
+ * Moves forward in @rawSeqStore by @nbBytes,
+ * which will update the fields 'pos' and 'posInSequence'.
+ */
+static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes)
+{
+ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
+ while (currPos && rawSeqStore->pos < rawSeqStore->size) {
+ rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
+ if (currPos >= currSeq.litLength + currSeq.matchLength) {
+ currPos -= currSeq.litLength + currSeq.matchLength;
+ rawSeqStore->pos++;
+ } else {
+ rawSeqStore->posInSequence = currPos;
+ break;
+ }
+ }
+ if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
+ rawSeqStore->posInSequence = 0;
+ }
+}
+
+/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
+ * Calculates the beginning and end of the next match in the current block.
+ * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
+ */
+static void
+ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
+ U32 blockBytesRemaining)
+{
+ rawSeq currSeq;
+ U32 currBlockEndPos;
+ U32 literalsBytesRemaining;
+ U32 matchBytesRemaining;
+
+ /* Setting match end position to MAX to ensure we never use an LDM during this block */
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ return;
+ }
+ /* Calculate appropriate bytes left in matchLength and litLength
+ * after adjusting based on ldmSeqStore->posInSequence */
+ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
+ assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
+ currBlockEndPos = currPosInBlock + blockBytesRemaining;
+ literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
+ currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
+ 0;
+ matchBytesRemaining = (literalsBytesRemaining == 0) ?
+ currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
+ currSeq.matchLength;
+
+ /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
+ if (literalsBytesRemaining >= blockBytesRemaining) {
+ optLdm->startPosInBlock = UINT_MAX;
+ optLdm->endPosInBlock = UINT_MAX;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
+ return;
+ }
+
+ /* Matches may be < MINMATCH by this process. In that case, we will reject them
+ when we are deciding whether or not to add the ldm */
+ optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
+ optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
+ optLdm->offset = currSeq.offset;
+
+ if (optLdm->endPosInBlock > currBlockEndPos) {
+ /* Match ends after the block ends, we can't use the whole match */
+ optLdm->endPosInBlock = currBlockEndPos;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
+ } else {
+ /* Consume nb of bytes equal to size of sequence left */
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
+ }
+}
+
+/* ZSTD_optLdm_maybeAddMatch():
+ * Adds a match if it's long enough,
+ * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
+ * into 'matches'. Maintains the correct ordering of 'matches'.
+ */
+static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
+ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
+{
+ U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
+ /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
+ U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
+
+ /* Ensure that current block position is not outside of the match */
+ if (currPosInBlock < optLdm->startPosInBlock
+ || currPosInBlock >= optLdm->endPosInBlock
+ || candidateMatchLength < MINMATCH) {
+ return;
+ }
+
+ if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
+ U32 const candidateOffCode = STORE_OFFSET(optLdm->offset);
+ DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
+ candidateOffCode, candidateMatchLength, currPosInBlock);
+ matches[*nbMatches].len = candidateMatchLength;
+ matches[*nbMatches].off = candidateOffCode;
+ (*nbMatches)++;
+ }
+}
+
+/* ZSTD_optLdm_processMatchCandidate():
+ * Wrapper function to update ldm seq store and call ldm functions as necessary.
+ */
+static void
+ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
+ ZSTD_match_t* matches, U32* nbMatches,
+ U32 currPosInBlock, U32 remainingBytes)
+{
+ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
+ return;
+ }
+
+ if (currPosInBlock >= optLdm->endPosInBlock) {
+ if (currPosInBlock > optLdm->endPosInBlock) {
+ /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
+ * at the end of a match from the ldm seq store, and will often be some bytes
+ * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
+ */
+ U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock;
+ ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
+ }
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
+ }
+ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
+}
+
+
+/*-*******************************
+* Optimal parser
+*********************************/
+
+static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
+{
+ return sol.litlen + sol.mlen;
+}
+
+#if 0 /* debug */
+
+static void
+listStats(const U32* table, int lastEltID)
+{
+ int const nbElts = lastEltID + 1;
+ int enb;
+ for (enb=0; enb < nbElts; enb++) {
+ (void)table;
+ /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */
+ RAWLOG(2, "%4i,", table[enb]);
+ }
+ RAWLOG(2, " \n");
+}
+
+#endif
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize,
+ const int optLevel,
+ const ZSTD_dictMode_e dictMode)
+{
+ optState_t* const optStatePtr = &ms->opt;
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* ip = istart;
+ const BYTE* anchor = istart;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* const ilimit = iend - 8;
+ const BYTE* const base = ms->window.base;
+ const BYTE* const prefixStart = base + ms->window.dictLimit;
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
+
+ ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode);
+
+ U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+ U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
+ U32 nextToUpdate3 = ms->nextToUpdate;
+
+ ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+ ZSTD_match_t* const matches = optStatePtr->matchTable;
+ ZSTD_optimal_t lastSequence;
+ ZSTD_optLdm_t optLdm;
+
+ optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
+ optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
+ ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
+
+ /* init */
+ DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
+ (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
+ assert(optLevel <= 2);
+ ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
+ ip += (ip==prefixStart);
+
+ /* Match Loop */
+ while (ip < ilimit) {
+ U32 cur, last_pos = 0;
+
+ /* find first match */
+ { U32 const litlen = (U32)(ip - anchor);
+ U32 const ll0 = !litlen;
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch);
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(ip-istart), (U32)(iend - ip));
+ if (!nbMatches) { ip++; continue; }
+
+ /* initialize opt[0] */
+ { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+ opt[0].mlen = 0; /* means is_a_literal */
+ opt[0].litlen = litlen;
+ /* We don't need to include the actual price of the literals because
+ * it is static for the duration of the forward pass, and is included
+ * in every price. We include the literal length to avoid negative
+ * prices when we subtract the previous literal length.
+ */
+ opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
+
+ /* large match -> immediate encoding */
+ { U32 const maxML = matches[nbMatches-1].len;
+ U32 const maxOffcode = matches[nbMatches-1].off;
+ DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
+ nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart));
+
+ if (maxML > sufficient_len) {
+ lastSequence.litlen = litlen;
+ lastSequence.mlen = maxML;
+ lastSequence.off = maxOffcode;
+ DEBUGLOG(6, "large match (%u>%u), immediate encoding",
+ maxML, sufficient_len);
+ cur = 0;
+ last_pos = ZSTD_totalLen(lastSequence);
+ goto _shortestPath;
+ } }
+
+ /* set prices for first matches starting position == 0 */
+ assert(opt[0].price >= 0);
+ { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 pos;
+ U32 matchNb;
+ for (pos = 1; pos < minMatch; pos++) {
+ opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */
+ }
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offcode = matches[matchNb].off;
+ U32 const end = matches[matchNb].len;
+ for ( ; pos <= end ; pos++ ) {
+ U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel);
+ U32 const sequencePrice = literalsPrice + matchPrice;
+ DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
+ pos, ZSTD_fCost(sequencePrice));
+ opt[pos].mlen = pos;
+ opt[pos].off = offcode;
+ opt[pos].litlen = litlen;
+ opt[pos].price = (int)sequencePrice;
+ } }
+ last_pos = pos-1;
+ }
+ }
+
+ /* check further positions */
+ for (cur = 1; cur <= last_pos; cur++) {
+ const BYTE* const inr = ip + cur;
+ assert(cur < ZSTD_OPT_NUM);
+ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
+
+ /* Fix current position with one literal if cheaper */
+ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
+ int const price = opt[cur-1].price
+ + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
+ - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ assert(price < 1000000000); /* overflow check */
+ if (price <= opt[cur].price) {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
+ opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
+ opt[cur].mlen = 0;
+ opt[cur].off = 0;
+ opt[cur].litlen = litlen;
+ opt[cur].price = price;
+ } else {
+ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
+ opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
+ }
+ }
+
+ /* Set the repcodes of the current position. We must do it here
+ * because we rely on the repcodes of the 2nd to last sequence being
+ * correct to set the next chunks repcodes during the backward
+ * traversal.
+ */
+ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
+ assert(cur >= opt[cur].mlen);
+ if (opt[cur].mlen != 0) {
+ U32 const prev = cur - opt[cur].mlen;
+ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
+ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
+ } else {
+ ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
+ }
+
+ /* last match must start at a minimum distance of 8 from oend */
+ if (inr > ilimit) continue;
+
+ if (cur == last_pos) break;
+
+ if ( (optLevel==0) /*static_test*/
+ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
+ DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
+ continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+ }
+
+ assert(opt[cur].price >= 0);
+ { U32 const ll0 = (opt[cur].mlen != 0);
+ U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
+ U32 const previousPrice = (U32)opt[cur].price;
+ U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
+ U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch);
+ U32 matchNb;
+
+ ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
+ (U32)(inr-istart), (U32)(iend-inr));
+
+ if (!nbMatches) {
+ DEBUGLOG(7, "rPos:%u : no match found", cur);
+ continue;
+ }
+
+ { U32 const maxML = matches[nbMatches-1].len;
+ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
+ inr-istart, cur, nbMatches, maxML);
+
+ if ( (maxML > sufficient_len)
+ || (cur + maxML >= ZSTD_OPT_NUM) ) {
+ lastSequence.mlen = maxML;
+ lastSequence.off = matches[nbMatches-1].off;
+ lastSequence.litlen = litlen;
+ cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
+ last_pos = cur + ZSTD_totalLen(lastSequence);
+ if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */
+ goto _shortestPath;
+ } }
+
+ /* set prices using matches found at position == cur */
+ for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+ U32 const offset = matches[matchNb].off;
+ U32 const lastML = matches[matchNb].len;
+ U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
+ U32 mlen;
+
+ DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
+ matchNb, matches[matchNb].off, lastML, litlen);
+
+ for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */
+ U32 const pos = cur + mlen;
+ int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+
+ if ((pos > last_pos) || (price < opt[pos].price)) {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */
+ opt[pos].mlen = mlen;
+ opt[pos].off = offset;
+ opt[pos].litlen = litlen;
+ opt[pos].price = price;
+ } else {
+ DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
+ pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
+ if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
+ }
+ } } }
+ } /* for (cur = 1; cur <= last_pos; cur++) */
+
+ lastSequence = opt[last_pos];
+ cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */
+ assert(cur < ZSTD_OPT_NUM); /* control overflow*/
+
+_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */
+ assert(opt[0].mlen == 0);
+
+ /* Set the next chunk's repcodes based on the repcodes of the beginning
+ * of the last match, and the last sequence. This avoids us having to
+ * update them while traversing the sequences.
+ */
+ if (lastSequence.mlen != 0) {
+ repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
+ ZSTD_memcpy(rep, &reps, sizeof(reps));
+ } else {
+ ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
+ }
+
+ { U32 const storeEnd = cur + 1;
+ U32 storeStart = storeEnd;
+ U32 seqPos = cur;
+
+ DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
+ last_pos, cur); (void)last_pos;
+ assert(storeEnd < ZSTD_OPT_NUM);
+ DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
+ opt[storeEnd] = lastSequence;
+ while (seqPos > 0) {
+ U32 const backDist = ZSTD_totalLen(opt[seqPos]);
+ storeStart--;
+ DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
+ seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
+ opt[storeStart] = opt[seqPos];
+ seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
+ }
+
+ /* save sequences */
+ DEBUGLOG(6, "sending selected sequences into seqStore")
+ { U32 storePos;
+ for (storePos=storeStart; storePos <= storeEnd; storePos++) {
+ U32 const llen = opt[storePos].litlen;
+ U32 const mlen = opt[storePos].mlen;
+ U32 const offCode = opt[storePos].off;
+ U32 const advance = llen + mlen;
+ DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
+ anchor - istart, (unsigned)llen, (unsigned)mlen);
+
+ if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */
+ assert(storePos == storeEnd); /* must be last sequence */
+ ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */
+ continue; /* will finish */
+ }
+
+ assert(anchor + llen <= iend);
+ ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
+ ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen);
+ anchor += advance;
+ ip = anchor;
+ } }
+ ZSTD_setBasePrices(optStatePtr, optLevel);
+ }
+ } /* while (ip < ilimit) */
+
+ /* Return the last literals size */
+ return (size_t)(iend - anchor);
+}
+
+static size_t ZSTD_compressBlock_opt0(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
+}
+
+static size_t ZSTD_compressBlock_opt2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
+{
+ return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
+}
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+}
+
+
+
+
+/* ZSTD_initStats_ultra():
+ * make a first compression pass, just to seed stats with more accurate starting values.
+ * only works on first block, with no dictionary and no ldm.
+ * this function cannot error, hence its contract must be respected.
+ */
+static void
+ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
+ seqStore_t* seqStore,
+ U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
+ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
+
+ DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
+ assert(ms->opt.litLengthSum == 0); /* first block */
+ assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */
+ assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */
+ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */
+
+ ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/
+
+ /* invalidate first scan from history */
+ ZSTD_resetSeqStore(seqStore);
+ ms->window.base -= srcSize;
+ ms->window.dictLimit += (U32)srcSize;
+ ms->window.lowLimit = ms->window.dictLimit;
+ ms->nextToUpdate = ms->window.dictLimit;
+
+}
+
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ U32 const curr = (U32)((const BYTE*)src - ms->window.base);
+ DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
+
+ /* 2-pass strategy:
+ * this strategy makes a first pass over first block to collect statistics
+ * and seed next round's statistics with it.
+ * After 1st pass, function forgets everything, and starts a new block.
+ * Consequently, this can only work if no data has been previously loaded in tables,
+ * aka, no dictionary, no prefix, no ldm preprocessing.
+ * The compression ratio gain is generally small (~0.5% on first block),
+ * the cost is 2x cpu time on first block. */
+ assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
+ if ( (ms->opt.litLengthSum==0) /* first block */
+ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */
+ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */
+ && (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
+ && (srcSize > ZSTD_PREDEF_THRESHOLD)
+ ) {
+ ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
+ }
+
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict);
+}
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState);
+}
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ const void* src, size_t srcSize)
+{
+ return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict);
+}
+
+/* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
new file mode 100644
index 000000000..22b862858
--- /dev/null
+++ b/lib/zstd/compress/zstd_opt.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
+
+
+#include "zstd_compress_internal.h"
+
+/* used in ZSTD_loadDictionaryContent() */
+void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
+
+size_t ZSTD_compressBlock_btopt(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra2(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+
+size_t ZSTD_compressBlock_btopt_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_dictMatchState(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btopt_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_extDict(
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+ void const* src, size_t srcSize);
+
+ /* note : no btultra2 variant for extDict nor dictMatchState,
+ * because btultra2 is not meant to work with dictionaries
+ * and is only specific for the first block (no prefix) */
+
+
+#endif /* ZSTD_OPT_H */
diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
new file mode 100644
index 000000000..60958afeb
--- /dev/null
+++ b/lib/zstd/decompress/huf_decompress.c
@@ -0,0 +1,1740 @@
+/* ******************************************************************
+ * huff0 huffman decoder,
+ * part of Finite State Entropy library
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ *
+ * You can contact the author at :
+ * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+****************************************************************** */
+
+/* **************************************************************
+* Dependencies
+****************************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
+#include "../common/compiler.h"
+#include "../common/bitstream.h" /* BIT_* */
+#include "../common/fse.h" /* to compress headers */
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/error_private.h"
+#include "../common/zstd_internal.h"
+
+/* **************************************************************
+* Constants
+****************************************************************/
+
+#define HUF_DECODER_FAST_TABLELOG 11
+
+/* **************************************************************
+* Macros
+****************************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * Huffman decompression implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(HUF_FORCE_DECOMPRESS_X1) && \
+ defined(HUF_FORCE_DECOMPRESS_X2)
+#error "Cannot force the use of the X1 and X2 decoders at the same time!"
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2
+# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
+#else
+# define HUF_ASM_X86_64_BMI2_ATTRS
+#endif
+
+#define HUF_EXTERN_C
+#define HUF_ASM_DECL HUF_EXTERN_C
+
+#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
+# define HUF_NEED_BMI2_FUNCTION 1
+#else
+# define HUF_NEED_BMI2_FUNCTION 0
+#endif
+
+#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__))
+# define HUF_NEED_DEFAULT_FUNCTION 1
+#else
+# define HUF_NEED_DEFAULT_FUNCTION 0
+#endif
+
+/* **************************************************************
+* Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+
+
+/* **************************************************************
+* Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
+/* **************************************************************
+* BMI2 Variant Wrappers
+****************************************************************/
+#if DYNAMIC_BMI2
+
+#define HUF_DGEN(fn) \
+ \
+ static size_t fn##_default( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \
+ void* dst, size_t dstSize, \
+ const void* cSrc, size_t cSrcSize, \
+ const HUF_DTable* DTable) \
+ { \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ if (bmi2) { \
+ return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \
+ } \
+ return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#else
+
+#define HUF_DGEN(fn) \
+ static size_t fn(void* dst, size_t dstSize, void const* cSrc, \
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \
+ { \
+ (void)bmi2; \
+ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \
+ }
+
+#endif
+
+
+/*-***************************/
+/* generic DTableDesc */
+/*-***************************/
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+ DTableDesc dtd;
+ ZSTD_memcpy(&dtd, table, sizeof(dtd));
+ return dtd;
+}
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+static size_t HUF_initDStream(BYTE const* ip) {
+ BYTE const lastByte = ip[7];
+ size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+ size_t const value = MEM_readLEST(ip) | 1;
+ assert(bitsConsumed <= 8);
+ return value << bitsConsumed;
+}
+typedef struct {
+ BYTE const* ip[4];
+ BYTE* op[4];
+ U64 bits[4];
+ void const* dt;
+ BYTE const* ilimit;
+ BYTE* oend;
+ BYTE const* iend[4];
+} HUF_DecompressAsmArgs;
+
+/*
+ * Initializes args for the asm decoding loop.
+ * @returns 0 on success
+ * 1 if the fallback implementation should be used.
+ * Or an error code on failure.
+ */
+static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
+{
+ void const* dt = DTable + 1;
+ U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
+
+ const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
+
+ BYTE* const oend = (BYTE*)dst + dstSize;
+
+ /* The following condition is false on x32 platform,
+ * but HUF_asm is not compatible with this ABI */
+ if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1;
+
+ /* strict minimum : jump table + 1 byte per stream */
+ if (srcSize < 10)
+ return ERROR(corruption_detected);
+
+ /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
+ * If table log is not correct at this point, fallback to the old decoder.
+ * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
+ */
+ if (dtLog != HUF_DECODER_FAST_TABLELOG)
+ return 1;
+
+ /* Read the jump table. */
+ {
+ const BYTE* const istart = (const BYTE*)src;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
+ args->iend[0] = istart + 6; /* jumpTable */
+ args->iend[1] = args->iend[0] + length1;
+ args->iend[2] = args->iend[1] + length2;
+ args->iend[3] = args->iend[2] + length3;
+
+ /* HUF_initDStream() requires this, and this small of an input
+ * won't benefit from the ASM loop anyways.
+ * length1 must be >= 16 so that ip[0] >= ilimit before the loop
+ * starts.
+ */
+ if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
+ return 1;
+ if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
+ }
+ /* ip[] contains the position that is currently loaded into bits[]. */
+ args->ip[0] = args->iend[1] - sizeof(U64);
+ args->ip[1] = args->iend[2] - sizeof(U64);
+ args->ip[2] = args->iend[3] - sizeof(U64);
+ args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
+
+ /* op[] contains the output pointers. */
+ args->op[0] = (BYTE*)dst;
+ args->op[1] = args->op[0] + (dstSize+3)/4;
+ args->op[2] = args->op[1] + (dstSize+3)/4;
+ args->op[3] = args->op[2] + (dstSize+3)/4;
+
+ /* No point to call the ASM loop for tiny outputs. */
+ if (args->op[3] >= oend)
+ return 1;
+
+ /* bits[] is the bit container.
+ * It is read from the MSB down to the LSB.
+ * It is shifted left as it is read, and zeros are
+ * shifted in. After the lowest valid bit a 1 is
+ * set, so that CountTrailingZeros(bits[]) can be used
+ * to count how many bits we've consumed.
+ */
+ args->bits[0] = HUF_initDStream(args->ip[0]);
+ args->bits[1] = HUF_initDStream(args->ip[1]);
+ args->bits[2] = HUF_initDStream(args->ip[2]);
+ args->bits[3] = HUF_initDStream(args->ip[3]);
+
+ /* If ip[] >= ilimit, it is guaranteed to be safe to
+ * reload bits[]. It may be beyond its section, but is
+ * guaranteed to be valid (>= istart).
+ */
+ args->ilimit = ilimit;
+
+ args->oend = oend;
+ args->dt = dt;
+
+ return 0;
+}
+
+static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd)
+{
+ /* Validate that we haven't overwritten. */
+ if (args->op[stream] > segmentEnd)
+ return ERROR(corruption_detected);
+ /* Validate that we haven't read beyond iend[].
+ * Note that ip[] may be < iend[] because the MSB is
+ * the next bit to read, and we may have consumed 100%
+ * of the stream, so down to iend[i] - 8 is valid.
+ */
+ if (args->ip[stream] < args->iend[stream] - 8)
+ return ERROR(corruption_detected);
+
+ /* Construct the BIT_DStream_t. */
+ bit->bitContainer = MEM_readLE64(args->ip[stream]);
+ bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]);
+ bit->start = (const char*)args->iend[0];
+ bit->limitPtr = bit->start + sizeof(size_t);
+ bit->ptr = (const char*)args->ip[stream];
+
+ return 0;
+}
+#endif
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+
+/*-***************************/
+/* single-symbol decoding */
+/*-***************************/
+typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */
+
+/*
+ * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
+ * a time.
+ */
+static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
+ U64 D4;
+ if (MEM_isLittleEndian()) {
+ D4 = (symbol << 8) + nbBits;
+ } else {
+ D4 = symbol + (nbBits << 8);
+ }
+ D4 *= 0x0001000100010001ULL;
+ return D4;
+}
+
+/*
+ * Increase the tableLog to targetTableLog and rescales the stats.
+ * If tableLog > targetTableLog this is a no-op.
+ * @returns New tableLog
+ */
+static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
+{
+ if (tableLog > targetTableLog)
+ return tableLog;
+ if (tableLog < targetTableLog) {
+ U32 const scale = targetTableLog - tableLog;
+ U32 s;
+ /* Increase the weight for all non-zero probability symbols by scale. */
+ for (s = 0; s < nbSymbols; ++s) {
+ huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
+ }
+ /* Update rankVal to reflect the new weights.
+ * All weights except 0 get moved to weight + scale.
+ * Weights [1, scale] are empty.
+ */
+ for (s = targetTableLog; s > scale; --s) {
+ rankVal[s] = rankVal[s - scale];
+ }
+ for (s = scale; s > 0; --s) {
+ rankVal[s] = 0;
+ }
+ }
+ return targetTableLog;
+}
+
+typedef struct {
+ U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
+ U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+ BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
+} HUF_ReadDTableX1_Workspace;
+
+
+size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+{
+ return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ U32 tableLog = 0;
+ U32 nbSymbols = 0;
+ size_t iSize;
+ void* const dtPtr = DTable + 1;
+ HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
+ HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
+
+ DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
+ if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
+
+ DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+ /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
+ if (HUF_isError(iSize)) return iSize;
+
+
+ /* Table header */
+ { DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 const maxTableLog = dtd.maxTableLog + 1;
+ U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG);
+ tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog);
+ if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
+ dtd.tableType = 0;
+ dtd.tableLog = (BYTE)tableLog;
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
+ }
+
+ /* Compute symbols and rankStart given rankVal:
+ *
+ * rankVal already contains the number of values of each weight.
+ *
+ * symbols contains the symbols ordered by weight. First are the rankVal[0]
+ * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
+ * symbols[0] is filled (but unused) to avoid a branch.
+ *
+ * rankStart contains the offset where each rank belongs in the DTable.
+ * rankStart[0] is not filled because there are no entries in the table for
+ * weight 0.
+ */
+ {
+ int n;
+ int nextRankStart = 0;
+ int const unroll = 4;
+ int const nLimit = (int)nbSymbols - unroll + 1;
+ for (n=0; n<(int)tableLog+1; n++) {
+ U32 const curr = nextRankStart;
+ nextRankStart += wksp->rankVal[n];
+ wksp->rankStart[n] = curr;
+ }
+ for (n=0; n < nLimit; n += unroll) {
+ int u;
+ for (u=0; u < unroll; ++u) {
+ size_t const w = wksp->huffWeight[n+u];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
+ }
+ }
+ for (; n < (int)nbSymbols; ++n) {
+ size_t const w = wksp->huffWeight[n];
+ wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
+ }
+ }
+
+ /* fill DTable
+ * We fill all entries of each weight in order.
+ * That way length is a constant for each iteration of the outer loop.
+ * We can switch based on the length to a different inner loop which is
+ * optimized for that particular case.
+ */
+ {
+ U32 w;
+ int symbol=wksp->rankVal[0];
+ int rankStart=0;
+ for (w=1; w<tableLog+1; ++w) {
+ int const symbolCount = wksp->rankVal[w];
+ int const length = (1 << w) >> 1;
+ int uStart = rankStart;
+ BYTE const nbBits = (BYTE)(tableLog + 1 - w);
+ int s;
+ int u;
+ switch (length) {
+ case 1:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart] = D;
+ uStart += 1;
+ }
+ break;
+ case 2:
+ for (s=0; s<symbolCount; ++s) {
+ HUF_DEltX1 D;
+ D.byte = wksp->symbols[symbol + s];
+ D.nbBits = nbBits;
+ dt[uStart+0] = D;
+ dt[uStart+1] = D;
+ uStart += 2;
+ }
+ break;
+ case 4:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ uStart += 4;
+ }
+ break;
+ case 8:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ MEM_write64(dt + uStart, D4);
+ MEM_write64(dt + uStart + 4, D4);
+ uStart += 8;
+ }
+ break;
+ default:
+ for (s=0; s<symbolCount; ++s) {
+ U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
+ for (u=0; u < length; u += 16) {
+ MEM_write64(dt + uStart + u + 0, D4);
+ MEM_write64(dt + uStart + u + 4, D4);
+ MEM_write64(dt + uStart + u + 8, D4);
+ MEM_write64(dt + uStart + u + 12, D4);
+ }
+ assert(u == length);
+ uStart += length;
+ }
+ break;
+ }
+ symbol += symbolCount;
+ rankStart += symbolCount * length;
+ }
+ }
+ return iSize;
+}
+
+FORCE_INLINE_TEMPLATE BYTE
+HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+ BYTE const c = dt[val].byte;
+ BIT_skipBits(Dstream, dt[val].nbBits);
+ return c;
+}
+
+#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
+ *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
+
+HINT_INLINE size_t
+HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 4 symbols at a time */
+ if ((pEnd - p) > 3) {
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+ }
+ } else {
+ BIT_reloadDStream(bitDPtr);
+ }
+
+ /* [0-3] symbols remaining */
+ if (MEM_32bits())
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ /* no more data to retrieve from bitstream, no need to reload */
+ while (p < pEnd)
+ HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
+
+ return pEnd-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BYTE* op = (BYTE*)dst;
+ BYTE* const oend = op + dstSize;
+ const void* dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+ BIT_DStream_t bitD;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
+
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X1_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ /* Check */
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* const olimit = oend - 3;
+ const void* const dtPtr = DTable + 1;
+ const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ const size_t segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+ U32 endSignal = 1;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
+ for ( ; (endSignal) & (op4 < olimit) ; ) {
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+ }
+ }
+
+ /* check corruption */
+ /* note : should not be necessary : op# advance in lock step, and we control op4.
+ * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 supposed already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+#if HUF_NEED_BMI2_FUNCTION
+static BMI2_TARGET_ATTRIBUTE
+size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if HUF_NEED_DEFAULT_FUNCTION
+static
+size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
+
+static HUF_ASM_X86_64_BMI2_ATTRS
+size_t
+HUF_decompress4X1_usingDTable_internal_bmi2_asm(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ void const* dt = DTable + 1;
+ const BYTE* const iend = (const BYTE*)cSrc + 6;
+ BYTE* const oend = (BYTE*)dst + dstSize;
+ HUF_DecompressAsmArgs args;
+ {
+ size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init asm args");
+ if (ret != 0)
+ return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ }
+
+ assert(args.ip[0] >= args.ilimit);
+ HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args);
+
+ /* Our loop guarantees that ip[] >= ilimit and that we haven't
+ * overwritten any op[].
+ */
+ assert(args.ip[0] >= iend);
+ assert(args.ip[1] >= iend);
+ assert(args.ip[2] >= iend);
+ assert(args.ip[3] >= iend);
+ assert(args.op[3] <= oend);
+ (void)iend;
+
+ /* finish bit streams one by one. */
+ {
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* segmentEnd = (BYTE*)dst;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ BIT_DStream_t bit;
+ if (segmentSize <= (size_t)(oend - segmentEnd))
+ segmentEnd += segmentSize;
+ else
+ segmentEnd = oend;
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
+ /* Decompress and validate that we've produced exactly the expected length. */
+ args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
+ if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
+ }
+ }
+
+ /* decoded size */
+ return dstSize;
+}
+#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
+
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+ const void *cSrc,
+ size_t cSrcSize,
+ const HUF_DTable *DTable);
+
+HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
+
+static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+# if ZSTD_ENABLE_ASM_X86_64_BMI2
+ return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+# else
+ return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+# endif
+ }
+#else
+ (void)bmi2;
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
+ return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+#else
+ return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
+#endif
+}
+
+
+size_t HUF_decompress1X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X1_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 0) return ERROR(GENERIC);
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+}
+
+
+#endif /* HUF_FORCE_DECOMPRESS_X2 */
+
+
+#ifndef HUF_FORCE_DECOMPRESS_X1
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */
+typedef struct { BYTE symbol; } sortedSymbol_t;
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+/*
+ * Constructs a HUF_DEltX2 in a U32.
+ */
+static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
+{
+ U32 seq;
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
+ DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
+ if (MEM_isLittleEndian()) {
+ seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
+ return seq + (nbBits << 16) + ((U32)level << 24);
+ } else {
+ seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
+ return (seq << 16) + (nbBits << 8) + (U32)level;
+ }
+}
+
+/*
+ * Constructs a HUF_DEltX2.
+ */
+static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
+{
+ HUF_DEltX2 DElt;
+ U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
+ DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val));
+ ZSTD_memcpy(&DElt, &val, sizeof(val));
+ return DElt;
+}
+
+/*
+ * Constructs 2 HUF_DEltX2s and packs them into a U64.
+ */
+static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
+{
+ U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
+ return (U64)DElt + ((U64)DElt << 32);
+}
+
+/*
+ * Fills the DTable rank with all the symbols from [begin, end) that are each
+ * nbBits long.
+ *
+ * @param DTableRank The start of the rank in the DTable.
+ * @param begin The first symbol to fill (inclusive).
+ * @param end The last symbol to fill (exclusive).
+ * @param nbBits Each symbol is nbBits long.
+ * @param tableLog The table log.
+ * @param baseSeq If level == 1 { 0 } else { the first level symbol }
+ * @param level The level in the table. Must be 1 or 2.
+ */
+static void HUF_fillDTableX2ForWeight(
+ HUF_DEltX2* DTableRank,
+ sortedSymbol_t const* begin, sortedSymbol_t const* end,
+ U32 nbBits, U32 tableLog,
+ U16 baseSeq, int const level)
+{
+ U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
+ const sortedSymbol_t* ptr;
+ assert(level >= 1 && level <= 2);
+ switch (length) {
+ case 1:
+ for (ptr = begin; ptr != end; ++ptr) {
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
+ *DTableRank++ = DElt;
+ }
+ break;
+ case 2:
+ for (ptr = begin; ptr != end; ++ptr) {
+ HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
+ DTableRank[0] = DElt;
+ DTableRank[1] = DElt;
+ DTableRank += 2;
+ }
+ break;
+ case 4:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ DTableRank += 4;
+ }
+ break;
+ case 8:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
+ DTableRank += 8;
+ }
+ break;
+ default:
+ for (ptr = begin; ptr != end; ++ptr) {
+ U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
+ HUF_DEltX2* const DTableRankEnd = DTableRank + length;
+ for (; DTableRank != DTableRankEnd; DTableRank += 8) {
+ ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
+ }
+ }
+ break;
+ }
+}
+
+/* HUF_fillDTableX2Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
+ const U32* rankVal, const int minWeight, const int maxWeight1,
+ const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
+ U32 nbBitsBaseline, U16 baseSeq)
+{
+ /* Fill skipped values (all positions up to rankVal[minWeight]).
+ * These are positions only get a single symbol because the combined weight
+ * is too large.
+ */
+ if (minWeight>1) {
+ U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
+ U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
+ int const skipSize = rankVal[minWeight];
+ assert(length > 1);
+ assert((U32)skipSize < length);
+ switch (length) {
+ case 2:
+ assert(skipSize == 1);
+ ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
+ break;
+ case 4:
+ assert(skipSize <= 4);
+ ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
+ break;
+ default:
+ {
+ int i;
+ for (i = 0; i < skipSize; i += 8) {
+ ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
+ ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
+ }
+ }
+ }
+ }
+
+ /* Fill each of the second level symbols by weight. */
+ {
+ int w;
+ for (w = minWeight; w < maxWeight1; ++w) {
+ int const begin = rankStart[w];
+ int const end = rankStart[w+1];
+ U32 const nbBits = nbBitsBaseline - w;
+ U32 const totalBits = nbBits + consumedBits;
+ HUF_fillDTableX2ForWeight(
+ DTable + rankVal[w],
+ sortedSymbols + begin, sortedSymbols + end,
+ totalBits, targetLog,
+ baseSeq, /* level */ 2);
+ }
+ }
+}
+
+static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
+ const sortedSymbol_t* sortedList,
+ const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight,
+ const U32 nbBitsBaseline)
+{
+ U32* const rankVal = rankValOrigin[0];
+ const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+ const U32 minBits = nbBitsBaseline - maxWeight;
+ int w;
+ int const wEnd = (int)maxWeight + 1;
+
+ /* Fill DTable in order of weight. */
+ for (w = 1; w < wEnd; ++w) {
+ int const begin = (int)rankStart[w];
+ int const end = (int)rankStart[w+1];
+ U32 const nbBits = nbBitsBaseline - w;
+
+ if (targetLog-nbBits >= minBits) {
+ /* Enough room for a second symbol. */
+ int start = rankVal[w];
+ U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
+ int minWeight = nbBits + scaleLog;
+ int s;
+ if (minWeight < 1) minWeight = 1;
+ /* Fill the DTable for every symbol of weight w.
+ * These symbols get at least 1 second symbol.
+ */
+ for (s = begin; s != end; ++s) {
+ HUF_fillDTableX2Level2(
+ DTable + start, targetLog, nbBits,
+ rankValOrigin[nbBits], minWeight, wEnd,
+ sortedList, rankStart,
+ nbBitsBaseline, sortedList[s].symbol);
+ start += length;
+ }
+ } else {
+ /* Only a single symbol. */
+ HUF_fillDTableX2ForWeight(
+ DTable + rankVal[w],
+ sortedList + begin, sortedList + end,
+ nbBits, targetLog,
+ /* baseSeq */ 0, /* level */ 1);
+ }
+ }
+}
+
+typedef struct {
+ rankValCol_t rankVal[HUF_TABLELOG_MAX];
+ U32 rankStats[HUF_TABLELOG_MAX + 1];
+ U32 rankStart0[HUF_TABLELOG_MAX + 3];
+ sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
+ BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
+ U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
+} HUF_ReadDTableX2_Workspace;
+
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable,
+ const void* src, size_t srcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ U32 tableLog, maxW, nbSymbols;
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ U32 maxTableLog = dtd.maxTableLog;
+ size_t iSize;
+ void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
+ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+ U32 *rankStart;
+
+ HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
+
+ if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
+
+ rankStart = wksp->rankStart0 + 1;
+ ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
+ ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
+
+ DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
+ if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+ /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
+
+ iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2);
+ if (HUF_isError(iSize)) return iSize;
+
+ /* check result */
+ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
+ if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG;
+
+ /* find maxWeight */
+ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
+
+ /* Get start index of each weight */
+ { U32 w, nextRankStart = 0;
+ for (w=1; w<maxW+1; w++) {
+ U32 curr = nextRankStart;
+ nextRankStart += wksp->rankStats[w];
+ rankStart[w] = curr;
+ }
+ rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
+ rankStart[maxW+1] = nextRankStart;
+ }
+
+ /* sort symbols by weight */
+ { U32 s;
+ for (s=0; s<nbSymbols; s++) {
+ U32 const w = wksp->weightList[s];
+ U32 const r = rankStart[w]++;
+ wksp->sortedSymbol[r].symbol = (BYTE)s;
+ }
+ rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
+ }
+
+ /* Build rankVal */
+ { U32* const rankVal0 = wksp->rankVal[0];
+ { int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
+ U32 nextRankVal = 0;
+ U32 w;
+ for (w=1; w<maxW+1; w++) {
+ U32 curr = nextRankVal;
+ nextRankVal += wksp->rankStats[w] << (w+rescale);
+ rankVal0[w] = curr;
+ } }
+ { U32 const minBits = tableLog+1 - maxW;
+ U32 consumed;
+ for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+ U32* const rankValPtr = wksp->rankVal[consumed];
+ U32 w;
+ for (w = 1; w < maxW+1; w++) {
+ rankValPtr[w] = rankVal0[w] >> consumed;
+ } } } }
+
+ HUF_fillDTableX2(dt, maxTableLog,
+ wksp->sortedSymbol,
+ wksp->rankStart0, wksp->rankVal, maxW,
+ tableLog+1);
+
+ dtd.tableLog = (BYTE)maxTableLog;
+ dtd.tableType = 1;
+ ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
+ return iSize;
+}
+
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ ZSTD_memcpy(op, &dt[val].sequence, 2);
+ BIT_skipBits(DStream, dt[val].nbBits);
+ return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+ size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
+ ZSTD_memcpy(op, &dt[val].sequence, 1);
+ if (dt[val].length==1) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ } else {
+ if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+ BIT_skipBits(DStream, dt[val].nbBits);
+ if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+ /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+ }
+ }
+ return 1;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+ if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+ if (MEM_64bits()) \
+ ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+ const HUF_DEltX2* const dt, const U32 dtLog)
+{
+ BYTE* const pStart = p;
+
+ /* up to 8 symbols at a time */
+ if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
+ if (dtLog <= 11 && MEM_64bits()) {
+ /* up to 10 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+ } else {
+ /* up to 8 symbols at a time */
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+ }
+ }
+ } else {
+ BIT_reloadDStream(bitDPtr);
+ }
+
+ /* closer to end : up to 2 symbols at a time */
+ if ((size_t)(pEnd - p) >= 2) {
+ while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+ while (p <= pEnd-2)
+ HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
+ }
+
+ if (p < pEnd)
+ p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
+
+ return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ BIT_DStream_t bitD;
+
+ /* Init */
+ CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+ /* decode */
+ { BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
+ }
+
+ /* check */
+ if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+ /* decoded size */
+ return dstSize;
+}
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
+
+ { const BYTE* const istart = (const BYTE*) cSrc;
+ BYTE* const ostart = (BYTE*) dst;
+ BYTE* const oend = ostart + dstSize;
+ BYTE* const olimit = oend - (sizeof(size_t)-1);
+ const void* const dtPtr = DTable+1;
+ const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+ /* Init */
+ BIT_DStream_t bitD1;
+ BIT_DStream_t bitD2;
+ BIT_DStream_t bitD3;
+ BIT_DStream_t bitD4;
+ size_t const length1 = MEM_readLE16(istart);
+ size_t const length2 = MEM_readLE16(istart+2);
+ size_t const length3 = MEM_readLE16(istart+4);
+ size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+ const BYTE* const istart1 = istart + 6; /* jumpTable */
+ const BYTE* const istart2 = istart1 + length1;
+ const BYTE* const istart3 = istart2 + length2;
+ const BYTE* const istart4 = istart3 + length3;
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* const opStart2 = ostart + segmentSize;
+ BYTE* const opStart3 = opStart2 + segmentSize;
+ BYTE* const opStart4 = opStart3 + segmentSize;
+ BYTE* op1 = ostart;
+ BYTE* op2 = opStart2;
+ BYTE* op3 = opStart3;
+ BYTE* op4 = opStart4;
+ U32 endSignal = 1;
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+ U32 const dtLog = dtd.tableLog;
+
+ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
+ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
+ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+ CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+ CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+ CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+ /* 16-32 symbols per loop (4-8 symbols per stream) */
+ if ((size_t)(oend - op4) >= sizeof(size_t)) {
+ for ( ; (endSignal) & (op4 < olimit); ) {
+#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
+ endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
+#else
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+ HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+ HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+ HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+ HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+ endSignal = (U32)LIKELY((U32)
+ (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
+ & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
+#endif
+ }
+ }
+
+ /* check corruption */
+ if (op1 > opStart2) return ERROR(corruption_detected);
+ if (op2 > opStart3) return ERROR(corruption_detected);
+ if (op3 > opStart4) return ERROR(corruption_detected);
+ /* note : op4 already verified within main loop */
+
+ /* finish bitStreams one by one */
+ HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+ HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+ HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+ HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
+
+ /* check */
+ { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+ if (!endCheck) return ERROR(corruption_detected); }
+
+ /* decoded size */
+ return dstSize;
+ }
+}
+
+#if HUF_NEED_BMI2_FUNCTION
+static BMI2_TARGET_ATTRIBUTE
+size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if HUF_NEED_DEFAULT_FUNCTION
+static
+size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable) {
+ return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2
+
+HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN;
+
+static HUF_ASM_X86_64_BMI2_ATTRS size_t
+HUF_decompress4X2_usingDTable_internal_bmi2_asm(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable) {
+ void const* dt = DTable + 1;
+ const BYTE* const iend = (const BYTE*)cSrc + 6;
+ BYTE* const oend = (BYTE*)dst + dstSize;
+ HUF_DecompressAsmArgs args;
+ {
+ size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
+ FORWARD_IF_ERROR(ret, "Failed to init asm args");
+ if (ret != 0)
+ return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+ }
+
+ assert(args.ip[0] >= args.ilimit);
+ HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args);
+
+ /* note : op4 already verified within main loop */
+ assert(args.ip[0] >= iend);
+ assert(args.ip[1] >= iend);
+ assert(args.ip[2] >= iend);
+ assert(args.ip[3] >= iend);
+ assert(args.op[3] <= oend);
+ (void)iend;
+
+ /* finish bitStreams one by one */
+ {
+ size_t const segmentSize = (dstSize+3) / 4;
+ BYTE* segmentEnd = (BYTE*)dst;
+ int i;
+ for (i = 0; i < 4; ++i) {
+ BIT_DStream_t bit;
+ if (segmentSize <= (size_t)(oend - segmentEnd))
+ segmentEnd += segmentSize;
+ else
+ segmentEnd = oend;
+ FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
+ args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
+ if (args.op[i] != segmentEnd)
+ return ERROR(corruption_detected);
+ }
+ }
+
+ /* decoded size */
+ return dstSize;
+}
+#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */
+
+static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
+ size_t cSrcSize, HUF_DTable const* DTable, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+# if ZSTD_ENABLE_ASM_X86_64_BMI2
+ return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+# else
+ return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);
+# endif
+ }
+#else
+ (void)bmi2;
+#endif
+
+#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
+ return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable);
+#else
+ return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable);
+#endif
+}
+
+HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
+
+size_t HUF_decompress1X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X2_usingDTable(
+ void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc dtd = HUF_getDTableDesc(DTable);
+ if (dtd.tableType != 1) return ERROR(GENERIC);
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
+ workSpace, wkspSize);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+
+#endif /* HUF_FORCE_DECOMPRESS_X1 */
+
+
+/* ***********************************/
+/* Universal decompression selectors */
+/* ***********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+ const void* cSrc, size_t cSrcSize,
+ const HUF_DTable* DTable)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+#endif
+}
+
+
+#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] =
+{
+ /* single, double, quad */
+ {{0,0}, {1,1}}, /* Q==0 : impossible */
+ {{0,0}, {1,1}}, /* Q==1 : impossible */
+ {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
+ {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
+ {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
+ {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
+ {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
+ {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
+ {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
+ {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
+ {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
+ {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
+ {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
+ {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
+ {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
+ {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
+};
+#endif
+
+/* HUF_selectDecoder() :
+ * Tells which decoder is likely to decode faster,
+ * based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
+ * Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+ assert(dstSize > 0);
+ assert(dstSize <= 128*1024);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 0;
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dstSize;
+ (void)cSrcSize;
+ return 1;
+#else
+ /* decoder timing evaluation */
+ { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */
+ U32 const D256 = (U32)(dstSize >> 8);
+ U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+ U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+ DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */
+ return DTime1 < DTime0;
+ }
+#endif
+}
+
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+ size_t dstSize, const void* cSrc,
+ size_t cSrcSize, void* workSpace,
+ size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+ const void* cSrc, size_t cSrcSize,
+ void* workSpace, size_t wkspSize)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
+ if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
+ if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#else
+ return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize):
+ HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
+ cSrcSize, workSpace, wkspSize);
+#endif
+ }
+}
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+#ifndef HUF_FORCE_DECOMPRESS_X2
+size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ const BYTE* ip = (const BYTE*) cSrc;
+
+ size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+ if (HUF_isError(hSize)) return hSize;
+ if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+ ip += hSize; cSrcSize -= hSize;
+
+ return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+#endif
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+ DTableDesc const dtd = HUF_getDTableDesc(DTable);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)dtd;
+ assert(dtd.tableType == 0);
+ return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)dtd;
+ assert(dtd.tableType == 1);
+ return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#else
+ return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+ HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+#endif
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+ /* validation checks */
+ if (dstSize == 0) return ERROR(dstSize_tooSmall);
+ if (cSrcSize == 0) return ERROR(corruption_detected);
+
+ { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+#if defined(HUF_FORCE_DECOMPRESS_X1)
+ (void)algoNb;
+ assert(algoNb == 0);
+ return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#elif defined(HUF_FORCE_DECOMPRESS_X2)
+ (void)algoNb;
+ assert(algoNb == 1);
+ return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#else
+ return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+ HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+#endif
+ }
+}
+
diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
new file mode 100644
index 000000000..dbbc7919d
--- /dev/null
+++ b/lib/zstd/decompress/zstd_ddict.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_ddict.c :
+ * concentrates all logic that needs to know the internals of ZSTD_DDict object */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "zstd_decompress_internal.h"
+#include "zstd_ddict.h"
+
+
+
+
+/*-*******************************************************
+* Types
+*********************************************************/
+struct ZSTD_DDict_s {
+ void* dictBuffer;
+ const void* dictContent;
+ size_t dictSize;
+ ZSTD_entropyDTables_t entropy;
+ U32 dictID;
+ U32 entropyPresent;
+ ZSTD_customMem cMem;
+}; /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictContent;
+}
+
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
+{
+ assert(ddict != NULL);
+ return ddict->dictSize;
+}
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_copyDDictParameters");
+ assert(dctx != NULL);
+ assert(ddict != NULL);
+ dctx->dictID = ddict->dictID;
+ dctx->prefixStart = ddict->dictContent;
+ dctx->virtualStart = ddict->dictContent;
+ dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+ dctx->previousDstEnd = dctx->dictEnd;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentBeginForFuzzing = dctx->prefixStart;
+ dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
+#endif
+ if (ddict->entropyPresent) {
+ dctx->litEntropy = 1;
+ dctx->fseEntropy = 1;
+ dctx->LLTptr = ddict->entropy.LLTable;
+ dctx->MLTptr = ddict->entropy.MLTable;
+ dctx->OFTptr = ddict->entropy.OFTable;
+ dctx->HUFptr = ddict->entropy.hufTable;
+ dctx->entropy.rep[0] = ddict->entropy.rep[0];
+ dctx->entropy.rep[1] = ddict->entropy.rep[1];
+ dctx->entropy.rep[2] = ddict->entropy.rep[2];
+ } else {
+ dctx->litEntropy = 0;
+ dctx->fseEntropy = 0;
+ }
+}
+
+
+static size_t
+ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
+ ZSTD_dictContentType_e dictContentType)
+{
+ ddict->dictID = 0;
+ ddict->entropyPresent = 0;
+ if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+ if (ddict->dictSize < 8) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ { U32 const magic = MEM_readLE32(ddict->dictContent);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ if (dictContentType == ZSTD_dct_fullDict)
+ return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
+ return 0; /* pure content mode */
+ }
+ }
+ ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
+ &ddict->entropy, ddict->dictContent, ddict->dictSize)),
+ dictionary_corrupted, "");
+ ddict->entropyPresent = 1;
+ return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+ ddict->dictBuffer = NULL;
+ ddict->dictContent = dict;
+ if (!dict) dictSize = 0;
+ } else {
+ void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
+ ddict->dictBuffer = internalBuffer;
+ ddict->dictContent = internalBuffer;
+ if (!internalBuffer) return ERROR(memory_allocation);
+ ZSTD_memcpy(internalBuffer, dict, dictSize);
+ }
+ ddict->dictSize = dictSize;
+ ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+
+ /* parse dictionary content */
+ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
+
+ return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType,
+ ZSTD_customMem customMem)
+{
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
+ if (ddict == NULL) return NULL;
+ ddict->cMem = customMem;
+ { size_t const initResult = ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ dictLoadMethod, dictContentType);
+ if (ZSTD_isError(initResult)) {
+ ZSTD_freeDDict(ddict);
+ return NULL;
+ } }
+ return ddict;
+ }
+}
+
+/*! ZSTD_createDDict() :
+* Create a digested dictionary, to start decompression without startup delay.
+* `dict` content is copied inside DDict.
+* Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ * Create a digested dictionary, to start decompression without startup delay.
+ * Dictionary content is simply referenced, it will be accessed during decompression.
+ * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
+ return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+ void* sBuffer, size_t sBufferSize,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ size_t const neededSpace = sizeof(ZSTD_DDict)
+ + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+ ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
+ assert(sBuffer != NULL);
+ assert(dict != NULL);
+ if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */
+ if (sBufferSize < neededSpace) return NULL;
+ if (dictLoadMethod == ZSTD_dlm_byCopy) {
+ ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */
+ dict = ddict+1;
+ }
+ if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
+ dict, dictSize,
+ ZSTD_dlm_byRef, dictContentType) ))
+ return NULL;
+ return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support free on NULL */
+ { ZSTD_customMem const cMem = ddict->cMem;
+ ZSTD_customFree(ddict->dictBuffer, cMem);
+ ZSTD_customFree(ddict, cMem);
+ return 0;
+ }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ * Estimate amount of memory that will be needed to create a dictionary for decompression.
+ * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+ return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0; /* support sizeof on NULL */
+ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ * Provides the dictID of the dictionary loaded into `ddict`.
+ * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+ if (ddict==NULL) return 0;
+ return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
new file mode 100644
index 000000000..8c1a79d66
--- /dev/null
+++ b/lib/zstd/decompress/zstd_ddict.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DDICT_H
+#define ZSTD_DDICT_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/zstd_deps.h" /* size_t */
+#include <linux/zstd.h> /* ZSTD_DDict, and several public functions */
+
+
+/*-*******************************************************
+ * Interface
+ *********************************************************/
+
+/* note: several prototypes are already published in `zstd.h` :
+ * ZSTD_createDDict()
+ * ZSTD_createDDict_byReference()
+ * ZSTD_createDDict_advanced()
+ * ZSTD_freeDDict()
+ * ZSTD_initStaticDDict()
+ * ZSTD_sizeof_DDict()
+ * ZSTD_estimateDDictSize()
+ * ZSTD_getDictID_fromDict()
+ */
+
+const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
+size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
+
+void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+
+#endif /* ZSTD_DDICT_H */
diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
new file mode 100644
index 000000000..6b3177c94
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress.c
@@ -0,0 +1,2150 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ***************************************************************
+* Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ */
+#ifndef ZSTD_HEAPMODE
+# define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+* LEGACY_SUPPORT :
+* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+
+/*!
+ * MAXWINDOWSIZE_DEFAULT :
+ * maximum window size accepted by DStream __by default__.
+ * Frames requiring more memory will be rejected.
+ * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
+#endif
+
+/*!
+ * NO_FORWARD_PROGRESS_MAX :
+ * maximum allowed nb of calls to ZSTD_decompressStream()
+ * without any forward progress
+ * (defined as: no byte read from input, and no byte flushed to output)
+ * before triggering an error.
+ */
+#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
+# define ZSTD_NO_FORWARD_PROGRESS_MAX 16
+#endif
+
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
+#include "../common/zstd_internal.h" /* blockProperties_t */
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */
+
+
+
+
+/* ***********************************
+ * Multiple DDicts Hashset internals *
+ *************************************/
+
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
+#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
+ * Currently, that means a 0.75 load factor.
+ * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
+ * the load factor of the ddict hash set.
+ */
+
+#define DDICT_HASHSET_TABLE_BASE_SIZE 64
+#define DDICT_HASHSET_RESIZE_FACTOR 2
+
+/* Hash function to determine starting position of dict insertion within the table
+ * Returns an index between [0, hashSet->ddictPtrTableSize]
+ */
+static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ const U64 hash = xxh64(&dictID, sizeof(U32), 0);
+ /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
+ return hash & (hashSet->ddictPtrTableSize - 1);
+}
+
+/* Adds DDict to a hashset without resizing it.
+ * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
+ * Returns 0 if successful, or a zstd error code if something went wrong.
+ */
+static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
+ const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ while (hashSet->ddictPtrTable[idx] != NULL) {
+ /* Replace existing ddict if inserting ddict with same dictID */
+ if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
+ DEBUGLOG(4, "DictID already exists, replacing rather than adding");
+ hashSet->ddictPtrTable[idx] = ddict;
+ return 0;
+ }
+ idx &= idxRangeMask;
+ idx++;
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ hashSet->ddictPtrTable[idx] = ddict;
+ hashSet->ddictPtrCount++;
+ return 0;
+}
+
+/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
+ * rehashes all values, allocates new table, frees old table.
+ * Returns 0 on success, otherwise a zstd error code.
+ */
+static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
+ const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
+ const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
+ size_t oldTableSize = hashSet->ddictPtrTableSize;
+ size_t i;
+
+ DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
+ RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
+ hashSet->ddictPtrTable = newTable;
+ hashSet->ddictPtrTableSize = newTableSize;
+ hashSet->ddictPtrCount = 0;
+ for (i = 0; i < oldTableSize; ++i) {
+ if (oldTable[i] != NULL) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
+ }
+ }
+ ZSTD_customFree((void*)oldTable, customMem);
+ DEBUGLOG(4, "Finished re-hash");
+ return 0;
+}
+
+/* Fetches a DDict with the given dictID
+ * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
+ */
+static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
+ size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
+ const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
+ DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
+ for (;;) {
+ size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
+ if (currDictID == dictID || currDictID == 0) {
+ /* currDictID == 0 implies a NULL ddict entry */
+ break;
+ } else {
+ idx &= idxRangeMask; /* Goes to start of table when we reach the end */
+ idx++;
+ }
+ }
+ DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
+ return hashSet->ddictPtrTable[idx];
+}
+
+/* Allocates space for and returns a ddict hash set
+ * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
+ * Returns NULL if allocation failed.
+ */
+static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
+ ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
+ DEBUGLOG(4, "Allocating new hash set");
+ if (!ret)
+ return NULL;
+ ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
+ if (!ret->ddictPtrTable) {
+ ZSTD_customFree(ret, customMem);
+ return NULL;
+ }
+ ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
+ ret->ddictPtrCount = 0;
+ return ret;
+}
+
+/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
+ * Note: The ZSTD_DDict* within the table are NOT freed.
+ */
+static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Freeing ddict hash set");
+ if (hashSet && hashSet->ddictPtrTable) {
+ ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
+ }
+ if (hashSet) {
+ ZSTD_customFree(hashSet, customMem);
+ }
+}
+
+/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
+ * Returns 0 on success, or a ZSTD error.
+ */
+static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
+ DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
+ if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
+ }
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
+ return 0;
+}
+
+/*-*************************************************************
+* Context management
+***************************************************************/
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support sizeof NULL */
+ return sizeof(*dctx)
+ + ZSTD_sizeof_DDict(dctx->ddictLocal)
+ + dctx->inBuffSize + dctx->outBuffSize;
+}
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+{
+ size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
+ /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+ return startingInputLength;
+}
+
+static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
+{
+ assert(dctx->streamStage == zdss_init);
+ dctx->format = ZSTD_f_zstd1;
+ dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+ dctx->outBufferMode = ZSTD_bm_buffered;
+ dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
+ dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
+}
+
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+{
+ dctx->staticSize = 0;
+ dctx->ddict = NULL;
+ dctx->ddictLocal = NULL;
+ dctx->dictEnd = NULL;
+ dctx->ddictIsCold = 0;
+ dctx->dictUses = ZSTD_dont_use;
+ dctx->inBuff = NULL;
+ dctx->inBuffSize = 0;
+ dctx->outBuffSize = 0;
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ dctx->oversizedDuration = 0;
+#if DYNAMIC_BMI2
+ dctx->bmi2 = ZSTD_cpuSupportsBmi2();
+#endif
+ dctx->ddictSet = NULL;
+ ZSTD_DCtx_resetParameters(dctx);
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentEndForFuzzing = NULL;
+#endif
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+ ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+ if ((size_t)workspace & 7) return NULL; /* 8-aligned */
+ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
+
+ ZSTD_initDCtx_internal(dctx);
+ dctx->staticSize = workspaceSize;
+ dctx->inBuff = (char*)(dctx+1);
+ return dctx;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) {
+ if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
+
+ { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
+ if (!dctx) return NULL;
+ dctx->customMem = customMem;
+ ZSTD_initDCtx_internal(dctx);
+ return dctx;
+ }
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_internal(customMem);
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+ DEBUGLOG(3, "ZSTD_createDCtx");
+ return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
+}
+
+static void ZSTD_clearDict(ZSTD_DCtx* dctx)
+{
+ ZSTD_freeDDict(dctx->ddictLocal);
+ dctx->ddictLocal = NULL;
+ dctx->ddict = NULL;
+ dctx->dictUses = ZSTD_dont_use;
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+ if (dctx==NULL) return 0; /* support free on NULL */
+ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
+ { ZSTD_customMem const cMem = dctx->customMem;
+ ZSTD_clearDict(dctx);
+ ZSTD_customFree(dctx->inBuff, cMem);
+ dctx->inBuff = NULL;
+ if (dctx->ddictSet) {
+ ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
+ dctx->ddictSet = NULL;
+ }
+ ZSTD_customFree(dctx, cMem);
+ return 0;
+ }
+}
+
+/* no longer useful */
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+ size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+ ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
+}
+
+/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
+ * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
+ * accordingly sets the ddict to be used to decompress the frame.
+ *
+ * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
+ *
+ * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
+ */
+static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
+ assert(dctx->refMultipleDDicts && dctx->ddictSet);
+ DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
+ if (dctx->ddict) {
+ const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
+ if (frameDDict) {
+ DEBUGLOG(4, "DDict found!");
+ ZSTD_clearDict(dctx);
+ dctx->dictID = dctx->fParams.dictID;
+ dctx->ddict = frameDDict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ }
+}
+
+
+/*-*************************************************************
+ * Frame header decoding
+ ***************************************************************/
+
+/*! ZSTD_isFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ * Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if (magic == ZSTD_MAGICNUMBER) return 1;
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+ return 0;
+}
+
+/*! ZSTD_isSkippableFrame() :
+ * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
+ * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ */
+unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
+{
+ if (size < ZSTD_FRAMEIDSIZE) return 0;
+ { U32 const magic = MEM_readLE32(buffer);
+ if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+ }
+ return 0;
+}
+
+/* ZSTD_frameHeaderSize_internal() :
+ * srcSize must be large enough to reach header size fields.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ * or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+ RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
+
+ { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+ U32 const dictID= fhd & 3;
+ U32 const singleSegment = (fhd >> 5) & 1;
+ U32 const fcsId = fhd >> 6;
+ return minInputSize + !singleSegment
+ + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+ + (singleSegment && !fcsId);
+ }
+}
+
+/* ZSTD_frameHeaderSize() :
+ * srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header,
+ * or an error code (if srcSize is too small) */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+ return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/* ZSTD_getFrameHeader_advanced() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+{
+ const BYTE* ip = (const BYTE*)src;
+ size_t const minInputSize = ZSTD_startingInputLength(format);
+
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
+ if (srcSize < minInputSize) return minInputSize;
+ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
+
+ if ( (format != ZSTD_f_zstd1_magicless)
+ && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ /* skippable frame */
+ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
+ return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
+ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
+ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
+ zfhPtr->frameType = ZSTD_skippableFrame;
+ return 0;
+ }
+ RETURN_ERROR(prefix_unknown, "");
+ }
+
+ /* ensure there is enough `srcSize` to fully read/decode frame header */
+ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+ if (srcSize < fhsize) return fhsize;
+ zfhPtr->headerSize = (U32)fhsize;
+ }
+
+ { BYTE const fhdByte = ip[minInputSize-1];
+ size_t pos = minInputSize;
+ U32 const dictIDSizeCode = fhdByte&3;
+ U32 const checksumFlag = (fhdByte>>2)&1;
+ U32 const singleSegment = (fhdByte>>5)&1;
+ U32 const fcsID = fhdByte>>6;
+ U64 windowSize = 0;
+ U32 dictID = 0;
+ U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+ RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
+ "reserved bits, must be zero");
+
+ if (!singleSegment) {
+ BYTE const wlByte = ip[pos++];
+ U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
+ windowSize = (1ULL << windowLog);
+ windowSize += (windowSize >> 3) * (wlByte&7);
+ }
+ switch(dictIDSizeCode)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : break;
+ case 1 : dictID = ip[pos]; pos++; break;
+ case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+ case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+ }
+ switch(fcsID)
+ {
+ default:
+ assert(0); /* impossible */
+ ZSTD_FALLTHROUGH;
+ case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+ case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+ case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+ case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+ }
+ if (singleSegment) windowSize = frameContentSize;
+
+ zfhPtr->frameType = ZSTD_frame;
+ zfhPtr->frameContentSize = frameContentSize;
+ zfhPtr->windowSize = windowSize;
+ zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ zfhPtr->dictID = dictID;
+ zfhPtr->checksumFlag = checksumFlag;
+ }
+ return 0;
+}
+
+/* ZSTD_getFrameHeader() :
+ * decode Frame Header, or require larger `srcSize`.
+ * note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ * >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+ return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+/* ZSTD_getFrameContentSize() :
+ * compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+ { ZSTD_frameHeader zfh;
+ if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+ return ZSTD_CONTENTSIZE_ERROR;
+ if (zfh.frameType == ZSTD_skippableFrame) {
+ return 0;
+ } else {
+ return zfh.frameContentSize;
+ } }
+}
+
+static size_t readSkippableFrameSize(void const* src, size_t srcSize)
+{
+ size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
+ U32 sizeU32;
+
+ RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
+
+ sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
+ RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
+ frameParameter_unsupported, "");
+ {
+ size_t const skippableSize = skippableHeaderSize + sizeU32;
+ RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
+ return skippableSize;
+ }
+}
+
+/*! ZSTD_readSkippableFrame() :
+ * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
+ *
+ * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
+ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
+ * in the magicVariant.
+ *
+ * Returns an error if destination buffer is not large enough, or if the frame is not skippable.
+ *
+ * @return : number of bytes written or a ZSTD error.
+ */
+ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
+ const void* src, size_t srcSize)
+{
+ U32 const magicNumber = MEM_readLE32(src);
+ size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
+ size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
+
+ /* check input validity */
+ RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
+ RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
+
+ /* deliver payload */
+ if (skippableContentSize > 0 && dst != NULL)
+ ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
+ if (magicVariant != NULL)
+ *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
+ return skippableContentSize;
+}
+
+/* ZSTD_findDecompressedSize() :
+ * compatible with legacy mode
+ * `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ * skippable frames
+ * @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long totalDstSize = 0;
+
+ while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
+ U32 const magicNumber = MEM_readLE32(src);
+
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ if (ZSTD_isError(skippableSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+ assert(skippableSize <= srcSize);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ }
+
+ { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+ /* check for overflow */
+ if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+ totalDstSize += ret;
+ }
+ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+ if (ZSTD_isError(frameSrcSize)) {
+ return ZSTD_CONTENTSIZE_ERROR;
+ }
+
+ src = (const BYTE *)src + frameSrcSize;
+ srcSize -= frameSrcSize;
+ }
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+ return totalDstSize;
+}
+
+/* ZSTD_getDecompressedSize() :
+ * compatible with legacy mode
+ * @return : decompressed size if known, 0 otherwise
+ note : 0 can mean any of the following :
+ - frame content is empty
+ - decompressed size field is not present in frame header
+ - frame header unknown / not supported
+ - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+ unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+ ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+ return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+}
+
+
+/* ZSTD_decodeFrameHeader() :
+ * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+ * If multiple DDict references are enabled, also will choose the correct DDict to use.
+ * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+ size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
+ if (ZSTD_isError(result)) return result; /* invalid header */
+ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
+
+ /* Reference DDict requested by frame if dctx references multiple ddicts */
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(dctx);
+ }
+
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Skip the dictID check in fuzzing mode, because it makes the search
+ * harder.
+ */
+ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
+ dictionary_wrong, "");
+#endif
+ dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
+ if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
+ dctx->processedCSize += headerSize;
+ return 0;
+}
+
+static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ frameSizeInfo.compressedSize = ret;
+ frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
+ return frameSizeInfo;
+}
+
+static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo frameSizeInfo;
+ ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
+
+
+ if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
+ && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
+ assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
+ frameSizeInfo.compressedSize <= srcSize);
+ return frameSizeInfo;
+ } else {
+ const BYTE* ip = (const BYTE*)src;
+ const BYTE* const ipstart = ip;
+ size_t remainingSize = srcSize;
+ size_t nbBlocks = 0;
+ ZSTD_frameHeader zfh;
+
+ /* Extract Frame Header */
+ { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(ret))
+ return ZSTD_errorFrameSizeInfo(ret);
+ if (ret > 0)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ }
+
+ ip += zfh.headerSize;
+ remainingSize -= zfh.headerSize;
+
+ /* Iterate over each block */
+ while (1) {
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize))
+ return ZSTD_errorFrameSizeInfo(cBlockSize);
+
+ if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+
+ ip += ZSTD_blockHeaderSize + cBlockSize;
+ remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+ nbBlocks++;
+
+ if (blockProperties.lastBlock) break;
+ }
+
+ /* Final frame content checksum */
+ if (zfh.checksumFlag) {
+ if (remainingSize < 4)
+ return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
+ ip += 4;
+ }
+
+ frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
+ frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
+ ? zfh.frameContentSize
+ : nbBlocks * zfh.blockSizeMax;
+ return frameSizeInfo;
+ }
+}
+
+/* ZSTD_findFrameCompressedSize() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ return frameSizeInfo.compressedSize;
+}
+
+/* ZSTD_decompressBound() :
+ * compatible with legacy mode
+ * `src` must point to the start of a ZSTD frame or a skippeable frame
+ * `srcSize` must be at least as large as the frame contained
+ * @return : the maximum decompressed size of the compressed source
+ */
+unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
+{
+ unsigned long long bound = 0;
+ /* Iterate over each frame */
+ while (srcSize > 0) {
+ ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
+ size_t const compressedSize = frameSizeInfo.compressedSize;
+ unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
+ if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
+ return ZSTD_CONTENTSIZE_ERROR;
+ assert(srcSize >= compressedSize);
+ src = (const BYTE*)src + compressedSize;
+ srcSize -= compressedSize;
+ bound += decompressedBound;
+ }
+ return bound;
+}
+
+
+/*-*************************************************************
+ * Frame decoding
+ ***************************************************************/
+
+/* ZSTD_insertBlock() :
+ * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+ DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
+ ZSTD_checkContinuity(dctx, blockStart, blockSize);
+ dctx->previousDstEnd = (const char*)blockStart + blockSize;
+ return blockSize;
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_copyRawBlock");
+ RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
+ if (dst == NULL) {
+ if (srcSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null, "");
+ }
+ ZSTD_memmove(dst, src, srcSize);
+ return srcSize;
+}
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+ BYTE b,
+ size_t regenSize)
+{
+ RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
+ if (dst == NULL) {
+ if (regenSize == 0) return 0;
+ RETURN_ERROR(dstBuffer_null, "");
+ }
+ ZSTD_memset(dst, b, regenSize);
+ return regenSize;
+}
+
+static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
+{
+ (void)dctx;
+ (void)uncompressedSize;
+ (void)compressedSize;
+ (void)streaming;
+}
+
+
+/*! ZSTD_decompressFrame() :
+ * @dctx must be properly initialized
+ * will update *srcPtr and *srcSizePtr,
+ * to make *srcPtr progress by one frame. */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void** srcPtr, size_t *srcSizePtr)
+{
+ const BYTE* const istart = (const BYTE*)(*srcPtr);
+ const BYTE* ip = istart;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
+ BYTE* op = ostart;
+ size_t remainingSrcSize = *srcSizePtr;
+
+ DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
+
+ /* check */
+ RETURN_ERROR_IF(
+ remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
+ srcSize_wrong, "");
+
+ /* Frame Header */
+ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
+ ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
+ if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+ RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
+ srcSize_wrong, "");
+ FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
+ ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
+ }
+
+ /* Loop on each block */
+ while (1) {
+ BYTE* oBlockEnd = oend;
+ size_t decodedSize;
+ blockProperties_t blockProperties;
+ size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+ ip += ZSTD_blockHeaderSize;
+ remainingSrcSize -= ZSTD_blockHeaderSize;
+ RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
+
+ if (ip >= op && ip < oBlockEnd) {
+ /* We are decompressing in-place. Limit the output pointer so that we
+ * don't overwrite the block that we are currently reading. This will
+ * fail decompression if the input & output pointers aren't spaced
+ * far enough apart.
+ *
+ * This is important to set, even when the pointers are far enough
+ * apart, because ZSTD_decompressBlock_internal() can decide to store
+ * literals in the output buffer, after the block it is decompressing.
+ * Since we don't want anything to overwrite our input, we have to tell
+ * ZSTD_decompressBlock_internal to never write past ip.
+ *
+ * See ZSTD_allocateLiteralsBuffer() for reference.
+ */
+ oBlockEnd = op + (ip - op);
+ }
+
+ switch(blockProperties.blockType)
+ {
+ case bt_compressed:
+ decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
+ break;
+ case bt_raw :
+ /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
+ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
+ break;
+ case bt_rle :
+ decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
+ break;
+ case bt_reserved :
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+
+ if (ZSTD_isError(decodedSize)) return decodedSize;
+ if (dctx->validateChecksum)
+ xxh64_update(&dctx->xxhState, op, decodedSize);
+ if (decodedSize != 0)
+ op += decodedSize;
+ assert(ip != NULL);
+ ip += cBlockSize;
+ remainingSrcSize -= cBlockSize;
+ if (blockProperties.lastBlock) break;
+ }
+
+ if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+ RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
+ corruption_detected, "");
+ }
+ if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
+ if (!dctx->forceIgnoreChecksum) {
+ U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
+ U32 checkRead;
+ checkRead = MEM_readLE32(ip);
+ RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
+ }
+ ip += 4;
+ remainingSrcSize -= 4;
+ }
+ ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
+ /* Allow caller to get size read */
+ *srcPtr = ip;
+ *srcSizePtr = remainingSrcSize;
+ return (size_t)(op-ostart);
+}
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize,
+ const ZSTD_DDict* ddict)
+{
+ void* const dststart = dst;
+ int moreThan1Frame = 0;
+
+ DEBUGLOG(5, "ZSTD_decompressMultiFrame");
+ assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
+
+ if (ddict) {
+ dict = ZSTD_DDict_dictContent(ddict);
+ dictSize = ZSTD_DDict_dictSize(ddict);
+ }
+
+ while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
+
+
+ { U32 const magicNumber = MEM_readLE32(src);
+ DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+ (unsigned)magicNumber, ZSTD_MAGICNUMBER);
+ if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
+ size_t const skippableSize = readSkippableFrameSize(src, srcSize);
+ FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
+ assert(skippableSize <= srcSize);
+
+ src = (const BYTE *)src + skippableSize;
+ srcSize -= skippableSize;
+ continue;
+ } }
+
+ if (ddict) {
+ /* we were called from ZSTD_decompress_usingDDict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
+ } else {
+ /* this will initialize correctly with no dict if dict == NULL, so
+ * use this in all cases but ddict */
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
+ }
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+ &src, &srcSize);
+ RETURN_ERROR_IF(
+ (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
+ && (moreThan1Frame==1),
+ srcSize_wrong,
+ "At least one frame successfully completed, "
+ "but following bytes are garbage: "
+ "it's more likely to be a srcSize error, "
+ "specifying more input bytes than size of frame(s). "
+ "Note: one could be unlucky, it might be a corruption error instead, "
+ "happening right at the place where we expect zstd magic bytes. "
+ "But this is _much_ less likely than a srcSize field error.");
+ if (ZSTD_isError(res)) return res;
+ assert(res <= dstCapacity);
+ if (res != 0)
+ dst = (BYTE*)dst + res;
+ dstCapacity -= res;
+ }
+ moreThan1Frame = 1;
+ } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
+
+ return (size_t)((BYTE*)dst - (BYTE*)dststart);
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const void* dict, size_t dictSize)
+{
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+
+static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
+{
+ switch (dctx->dictUses) {
+ default:
+ assert(0 /* Impossible */);
+ ZSTD_FALLTHROUGH;
+ case ZSTD_dont_use:
+ ZSTD_clearDict(dctx);
+ return NULL;
+ case ZSTD_use_indefinitely:
+ return dctx->ddict;
+ case ZSTD_use_once:
+ dctx->dictUses = ZSTD_dont_use;
+ return dctx->ddict;
+ }
+}
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+ size_t regenSize;
+ ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem);
+ RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
+ regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+ ZSTD_freeDCtx(dctx);
+ return regenSize;
+#else /* stack mode */
+ ZSTD_DCtx dctx;
+ ZSTD_initDCtx_internal(&dctx);
+ return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+* Advanced Streaming Decompression API
+* Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+/*
+ * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed,
+ * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
+ * be streamed.
+ *
+ * For blocks that can be streamed, this allows us to reduce the latency until we produce
+ * output, and avoid copying the input.
+ *
+ * @param inputSize - The total amount of input that the caller currently has.
+ */
+static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
+ if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
+ return dctx->expected;
+ if (dctx->bType != bt_raw)
+ return dctx->expected;
+ return BOUNDED(1, inputSize, dctx->expected);
+}
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+ switch(dctx->stage)
+ {
+ default: /* should not happen */
+ assert(0);
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_getFrameHeaderSize:
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_decodeFrameHeader:
+ return ZSTDnit_frameHeader;
+ case ZSTDds_decodeBlockHeader:
+ return ZSTDnit_blockHeader;
+ case ZSTDds_decompressBlock:
+ return ZSTDnit_block;
+ case ZSTDds_decompressLastBlock:
+ return ZSTDnit_lastBlock;
+ case ZSTDds_checkChecksum:
+ return ZSTDnit_checksum;
+ case ZSTDds_decodeSkippableHeader:
+ ZSTD_FALLTHROUGH;
+ case ZSTDds_skipFrame:
+ return ZSTDnit_skippableFrame;
+ }
+}
+
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+
+/* ZSTD_decompressContinue() :
+ * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ * or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+ DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
+ /* Sanity check */
+ RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+
+ dctx->processedCSize += srcSize;
+
+ switch (dctx->stage)
+ {
+ case ZSTDds_getFrameHeaderSize :
+ assert(src != NULL);
+ if (dctx->format == ZSTD_f_zstd1) { /* allows header */
+ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */
+ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */
+ dctx->stage = ZSTDds_decodeSkippableHeader;
+ return 0;
+ } }
+ dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+ if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+ ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
+ dctx->expected = dctx->headerSize - srcSize;
+ dctx->stage = ZSTDds_decodeFrameHeader;
+ return 0;
+
+ case ZSTDds_decodeFrameHeader:
+ assert(src != NULL);
+ ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
+ dctx->expected = ZSTD_blockHeaderSize;
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ return 0;
+
+ case ZSTDds_decodeBlockHeader:
+ { blockProperties_t bp;
+ size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+ if (ZSTD_isError(cBlockSize)) return cBlockSize;
+ RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
+ dctx->expected = cBlockSize;
+ dctx->bType = bp.blockType;
+ dctx->rleSize = bp.origSize;
+ if (cBlockSize) {
+ dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+ return 0;
+ }
+ /* empty block */
+ if (bp.lastBlock) {
+ if (dctx->fParams.checksumFlag) {
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ dctx->expected = 0; /* end of frame */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ }
+ return 0;
+ }
+
+ case ZSTDds_decompressLastBlock:
+ case ZSTDds_decompressBlock:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+ { size_t rSize;
+ switch(dctx->bType)
+ {
+ case bt_compressed:
+ DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+ rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_raw :
+ assert(srcSize <= dctx->expected);
+ rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+ FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
+ assert(rSize == srcSize);
+ dctx->expected -= rSize;
+ break;
+ case bt_rle :
+ rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
+ dctx->expected = 0; /* Streaming not supported */
+ break;
+ case bt_reserved : /* should never happen */
+ default:
+ RETURN_ERROR(corruption_detected, "invalid block type");
+ }
+ FORWARD_IF_ERROR(rSize, "");
+ RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
+ DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
+ dctx->decodedSize += rSize;
+ if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
+ dctx->previousDstEnd = (char*)dst + rSize;
+
+ /* Stay on the same stage until we are finished streaming the block. */
+ if (dctx->expected > 0) {
+ return rSize;
+ }
+
+ if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
+ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
+ RETURN_ERROR_IF(
+ dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && dctx->decodedSize != dctx->fParams.frameContentSize,
+ corruption_detected, "");
+ if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
+ dctx->expected = 4;
+ dctx->stage = ZSTDds_checkChecksum;
+ } else {
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
+ dctx->expected = 0; /* ends here */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ }
+ } else {
+ dctx->stage = ZSTDds_decodeBlockHeader;
+ dctx->expected = ZSTD_blockHeaderSize;
+ }
+ return rSize;
+ }
+
+ case ZSTDds_checkChecksum:
+ assert(srcSize == 4); /* guaranteed by dctx->expected */
+ {
+ if (dctx->validateChecksum) {
+ U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
+ U32 const check32 = MEM_readLE32(src);
+ DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
+ RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
+ }
+ ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+ }
+
+ case ZSTDds_decodeSkippableHeader:
+ assert(src != NULL);
+ assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
+ ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */
+ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */
+ dctx->stage = ZSTDds_skipFrame;
+ return 0;
+
+ case ZSTDds_skipFrame:
+ dctx->expected = 0;
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ return 0;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dict;
+ dctx->previousDstEnd = (const char*)dict + dictSize;
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ dctx->dictContentBeginForFuzzing = dctx->prefixStart;
+ dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
+#endif
+ return 0;
+}
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of entropy tables read */
+size_t
+ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize)
+{
+ const BYTE* dictPtr = (const BYTE*)dict;
+ const BYTE* const dictEnd = dictPtr + dictSize;
+
+ RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
+ assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */
+ dictPtr += 8; /* skip header = magic + dictID */
+
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
+ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
+ ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
+ { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */
+ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
+#ifdef HUF_FORCE_DECOMPRESS_X1
+ /* in minimal huffman, we always use X1 variants */
+ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
+ dictPtr, dictEnd - dictPtr,
+ workspace, workspaceSize);
+#else
+ size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
+ dictPtr, (size_t)(dictEnd - dictPtr),
+ workspace, workspaceSize);
+#endif
+ RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
+ dictPtr += hSize;
+ }
+
+ { short offcodeNCount[MaxOff+1];
+ unsigned offcodeMaxValue = MaxOff, offcodeLog;
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
+ RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->OFTable,
+ offcodeNCount, offcodeMaxValue,
+ OF_base, OF_bits,
+ offcodeLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */0);
+ dictPtr += offcodeHeaderSize;
+ }
+
+ { short matchlengthNCount[MaxML+1];
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
+ RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->MLTable,
+ matchlengthNCount, matchlengthMaxValue,
+ ML_base, ML_bits,
+ matchlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
+ dictPtr += matchlengthHeaderSize;
+ }
+
+ { short litlengthNCount[MaxLL+1];
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
+ RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
+ RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
+ ZSTD_buildFSETable( entropy->LLTable,
+ litlengthNCount, litlengthMaxValue,
+ LL_base, LL_bits,
+ litlengthLog,
+ entropy->workspace, sizeof(entropy->workspace),
+ /* bmi2 */ 0);
+ dictPtr += litlengthHeaderSize;
+ }
+
+ RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
+ { int i;
+ size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+ for (i=0; i<3; i++) {
+ U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+ RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
+ dictionary_corrupted, "");
+ entropy->rep[i] = rep;
+ } }
+
+ return (size_t)(dictPtr - (const BYTE*)dict);
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+ { U32 const magic = MEM_readLE32(dict);
+ if (magic != ZSTD_MAGIC_DICTIONARY) {
+ return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
+ } }
+ dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+
+ /* load entropy tables */
+ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
+ RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
+ dict = (const char*)dict + eSize;
+ dictSize -= eSize;
+ }
+ dctx->litEntropy = dctx->fseEntropy = 1;
+
+ /* reference dictionary content */
+ return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+ assert(dctx != NULL);
+ dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
+ dctx->stage = ZSTDds_getFrameHeaderSize;
+ dctx->processedCSize = 0;
+ dctx->decodedSize = 0;
+ dctx->previousDstEnd = NULL;
+ dctx->prefixStart = NULL;
+ dctx->virtualStart = NULL;
+ dctx->dictEnd = NULL;
+ dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
+ dctx->litEntropy = dctx->fseEntropy = 0;
+ dctx->dictID = 0;
+ dctx->bType = bt_reserved;
+ ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+ ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
+ dctx->LLTptr = dctx->entropy.LLTable;
+ dctx->MLTptr = dctx->entropy.MLTable;
+ dctx->OFTptr = dctx->entropy.OFTable;
+ dctx->HUFptr = dctx->entropy.hufTable;
+ return 0;
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
+ if (dict && dictSize)
+ RETURN_ERROR_IF(
+ ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
+ dictionary_corrupted, "");
+ return 0;
+}
+
+
+/* ====== ZSTD_DDict ====== */
+
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
+ assert(dctx != NULL);
+ if (ddict) {
+ const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
+ size_t const dictSize = ZSTD_DDict_dictSize(ddict);
+ const void* const dictEnd = dictStart + dictSize;
+ dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
+ DEBUGLOG(4, "DDict is %s",
+ dctx->ddictIsCold ? "~cold~" : "hot!");
+ }
+ FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
+ if (ddict) { /* NULL ddict is equivalent to no dictionary */
+ ZSTD_copyDDictParameters(dctx, ddict);
+ }
+ return 0;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ * Provides the dictID stored within dictionary.
+ * if @return == 0, the dictionary is not conformant with Zstandard specification.
+ * It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+ if (dictSize < 8) return 0;
+ if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+ return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ * Provides the dictID required to decompress frame stored within `src`.
+ * If @return == 0, the dictID could not be decoded.
+ * This could for one of the following reasons :
+ * - The frame does not require a dictionary (most common case).
+ * - The frame was built with dictID intentionally removed.
+ * Needed dictionary is a hidden information.
+ * Note : this use case also happens when using a non-conformant dictionary.
+ * - `srcSize` is too small, and as a result, frame header could not be decoded.
+ * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ * - This is not a Zstandard frame.
+ * When identifying the exact failure cause, it's possible to use
+ * ZSTD_getFrameHeader(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+ size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+ if (ZSTD_isError(hError)) return 0;
+ return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+* Decompression using a pre-digested Dictionary
+* Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const ZSTD_DDict* ddict)
+{
+ /* pass content and size in case legacy frames are encountered */
+ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+ NULL, 0,
+ ddict);
+}
+
+
+/*=====================================
+* Streaming decompression
+*====================================*/
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+ DEBUGLOG(3, "ZSTD_createDStream");
+ return ZSTD_createDCtx_internal(ZSTD_defaultCMem);
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+ return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+ return ZSTD_createDCtx_internal(customMem);
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+ return ZSTD_freeDCtx(zds);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
+ const void* dict, size_t dictSize,
+ ZSTD_dictLoadMethod_e dictLoadMethod,
+ ZSTD_dictContentType_e dictContentType)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ if (dict && dictSize != 0) {
+ dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+ RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
+ dctx->ddict = dctx->ddictLocal;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ }
+ return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+ return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
+ dctx->dictUses = ZSTD_use_once;
+ return 0;
+}
+
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+ return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
+ * return : expected size, aka ZSTD_startingInputLength().
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+ DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
+ return ZSTD_startingInputLength(zds->format);
+}
+
+/* note : this variant can't fail */
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+ DEBUGLOG(4, "ZSTD_initDStream");
+ return ZSTD_initDStream_usingDDict(zds, NULL);
+}
+
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
+ FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
+ FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
+ return ZSTD_startingInputLength(dctx->format);
+}
+
+/* ZSTD_resetDStream() :
+ * return : expected size, aka ZSTD_startingInputLength().
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+{
+ FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
+ return ZSTD_startingInputLength(dctx->format);
+}
+
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ if (ddict) {
+ dctx->ddict = ddict;
+ dctx->dictUses = ZSTD_use_indefinitely;
+ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
+ if (dctx->ddictSet == NULL) {
+ dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
+ if (!dctx->ddictSet) {
+ RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
+ }
+ }
+ assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
+ FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
+ }
+ }
+ return 0;
+}
+
+/* ZSTD_DCtx_setMaxWindowSize() :
+ * note : no direct equivalence in ZSTD_DCtx_setParameter,
+ * since this version sets windowSize, and the other sets windowLog */
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
+ size_t const min = (size_t)1 << bounds.lowerBound;
+ size_t const max = (size_t)1 << bounds.upperBound;
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
+ RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
+ dctx->maxWindowSize = maxWindowSize;
+ return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+ return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
+}
+
+ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
+{
+ ZSTD_bounds bounds = { 0, 0, 0 };
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
+ bounds.upperBound = ZSTD_WINDOWLOG_MAX;
+ return bounds;
+ case ZSTD_d_format:
+ bounds.lowerBound = (int)ZSTD_f_zstd1;
+ bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
+ ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
+ return bounds;
+ case ZSTD_d_stableOutBuffer:
+ bounds.lowerBound = (int)ZSTD_bm_buffered;
+ bounds.upperBound = (int)ZSTD_bm_stable;
+ return bounds;
+ case ZSTD_d_forceIgnoreChecksum:
+ bounds.lowerBound = (int)ZSTD_d_validateChecksum;
+ bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
+ return bounds;
+ case ZSTD_d_refMultipleDDicts:
+ bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
+ bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
+ return bounds;
+ default:;
+ }
+ bounds.error = ERROR(parameter_unsupported);
+ return bounds;
+}
+
+/* ZSTD_dParam_withinBounds:
+ * @return 1 if value is within dParam bounds,
+ * 0 otherwise */
+static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
+{
+ ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
+ if (ZSTD_isError(bounds.error)) return 0;
+ if (value < bounds.lowerBound) return 0;
+ if (value > bounds.upperBound) return 0;
+ return 1;
+}
+
+#define CHECK_DBOUNDS(p,v) { \
+ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
+}
+
+size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
+{
+ switch (param) {
+ case ZSTD_d_windowLogMax:
+ *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
+ return 0;
+ case ZSTD_d_format:
+ *value = (int)dctx->format;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ *value = (int)dctx->outBufferMode;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ *value = (int)dctx->forceIgnoreChecksum;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ *value = (int)dctx->refMultipleDDicts;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
+size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
+{
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ switch(dParam) {
+ case ZSTD_d_windowLogMax:
+ if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
+ CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
+ dctx->maxWindowSize = ((size_t)1) << value;
+ return 0;
+ case ZSTD_d_format:
+ CHECK_DBOUNDS(ZSTD_d_format, value);
+ dctx->format = (ZSTD_format_e)value;
+ return 0;
+ case ZSTD_d_stableOutBuffer:
+ CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
+ dctx->outBufferMode = (ZSTD_bufferMode_e)value;
+ return 0;
+ case ZSTD_d_forceIgnoreChecksum:
+ CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
+ dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
+ return 0;
+ case ZSTD_d_refMultipleDDicts:
+ CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
+ if (dctx->staticSize != 0) {
+ RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
+ }
+ dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
+ return 0;
+ default:;
+ }
+ RETURN_ERROR(parameter_unsupported, "");
+}
+
+size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
+{
+ if ( (reset == ZSTD_reset_session_only)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ dctx->streamStage = zdss_init;
+ dctx->noForwardProgress = 0;
+ }
+ if ( (reset == ZSTD_reset_parameters)
+ || (reset == ZSTD_reset_session_and_parameters) ) {
+ RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
+ ZSTD_clearDict(dctx);
+ ZSTD_DCtx_resetParameters(dctx);
+ }
+ return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+ return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+ size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
+ unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
+ unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+ size_t const minRBSize = (size_t) neededSize;
+ RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
+ frameParameter_windowTooLarge, "");
+ return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+ size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+ size_t const inBuffSize = blockSize; /* no block can be larger */
+ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+ return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
+ U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
+ ZSTD_frameHeader zfh;
+ size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+ if (ZSTD_isError(err)) return err;
+ RETURN_ERROR_IF(err>0, srcSize_wrong, "");
+ RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
+ frameParameter_windowTooLarge, "");
+ return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
+/* ***** Decompression ***** */
+
+static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
+{
+ return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
+}
+
+static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
+{
+ if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
+ zds->oversizedDuration++;
+ else
+ zds->oversizedDuration = 0;
+}
+
+static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
+{
+ return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
+}
+
+/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
+static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
+{
+ ZSTD_outBuffer const expect = zds->expectedOutBuffer;
+ /* No requirement when ZSTD_obm_stable is not enabled. */
+ if (zds->outBufferMode != ZSTD_bm_stable)
+ return 0;
+ /* Any buffer is allowed in zdss_init, this must be the same for every other call until
+ * the context is reset.
+ */
+ if (zds->streamStage == zdss_init)
+ return 0;
+ /* The buffer must match our expectation exactly. */
+ if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
+ return 0;
+ RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
+}
+
+/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
+ * and updates the stage and the output buffer state. This call is extracted so it can be
+ * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
+ * NOTE: You must break after calling this function since the streamStage is modified.
+ */
+static size_t ZSTD_decompressContinueStream(
+ ZSTD_DStream* zds, char** op, char* oend,
+ void const* src, size_t srcSize) {
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ if (zds->outBufferMode == ZSTD_bm_buffered) {
+ size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
+ size_t const decodedSize = ZSTD_decompressContinue(zds,
+ zds->outBuff + zds->outStart, dstSize, src, srcSize);
+ FORWARD_IF_ERROR(decodedSize, "");
+ if (!decodedSize && !isSkipFrame) {
+ zds->streamStage = zdss_read;
+ } else {
+ zds->outEnd = zds->outStart + decodedSize;
+ zds->streamStage = zdss_flush;
+ }
+ } else {
+ /* Write directly into the output buffer */
+ size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
+ size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
+ FORWARD_IF_ERROR(decodedSize, "");
+ *op += decodedSize;
+ /* Flushing is not needed. */
+ zds->streamStage = zdss_read;
+ assert(*op <= oend);
+ assert(zds->outBufferMode == ZSTD_bm_stable);
+ }
+ return 0;
+}
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+ const char* const src = (const char*)input->src;
+ const char* const istart = input->pos != 0 ? src + input->pos : src;
+ const char* const iend = input->size != 0 ? src + input->size : src;
+ const char* ip = istart;
+ char* const dst = (char*)output->dst;
+ char* const ostart = output->pos != 0 ? dst + output->pos : dst;
+ char* const oend = output->size != 0 ? dst + output->size : dst;
+ char* op = ostart;
+ U32 someMoreWork = 1;
+
+ DEBUGLOG(5, "ZSTD_decompressStream");
+ RETURN_ERROR_IF(
+ input->pos > input->size,
+ srcSize_wrong,
+ "forbidden. in: pos: %u vs size: %u",
+ (U32)input->pos, (U32)input->size);
+ RETURN_ERROR_IF(
+ output->pos > output->size,
+ dstSize_tooSmall,
+ "forbidden. out: pos: %u vs size: %u",
+ (U32)output->pos, (U32)output->size);
+ DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+ FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
+
+ while (someMoreWork) {
+ switch(zds->streamStage)
+ {
+ case zdss_init :
+ DEBUGLOG(5, "stage zdss_init => transparent reset ");
+ zds->streamStage = zdss_loadHeader;
+ zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
+ zds->hostageByte = 0;
+ zds->expectedOutBuffer = *output;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_loadHeader :
+ DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+ { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+ if (zds->refMultipleDDicts && zds->ddictSet) {
+ ZSTD_DCtx_selectFrameDDict(zds);
+ }
+ DEBUGLOG(5, "header size : %u", (U32)hSize);
+ if (ZSTD_isError(hSize)) {
+ return hSize; /* error */
+ }
+ if (hSize != 0) { /* need more input */
+ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
+ size_t const remainingInput = (size_t)(iend-ip);
+ assert(iend >= ip);
+ if (toLoad > remainingInput) { /* not enough input to load full header */
+ if (remainingInput > 0) {
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+ zds->lhSize += remainingInput;
+ }
+ input->pos = input->size;
+ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
+ }
+ assert(ip != NULL);
+ ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+ break;
+ } }
+
+ /* check for single-pass mode opportunity */
+ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+ size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
+ if (cSize <= (size_t)(iend-istart)) {
+ /* shortcut : using single-pass mode */
+ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
+ if (ZSTD_isError(decompressedSize)) return decompressedSize;
+ DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+ ip = istart + cSize;
+ op += decompressedSize;
+ zds->expected = 0;
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ } }
+
+ /* Check output buffer is large enough for ZSTD_odm_stable. */
+ if (zds->outBufferMode == ZSTD_bm_stable
+ && zds->fParams.frameType != ZSTD_skippableFrame
+ && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
+ && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
+ RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
+ }
+
+ /* Consume header (see ZSTDds_decodeFrameHeader) */
+ DEBUGLOG(4, "Consume header");
+ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
+
+ if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
+ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
+ zds->stage = ZSTDds_skipFrame;
+ } else {
+ FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
+ zds->expected = ZSTD_blockHeaderSize;
+ zds->stage = ZSTDds_decodeBlockHeader;
+ }
+
+ /* control buffer memory usage */
+ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+ (U32)(zds->fParams.windowSize >>10),
+ (U32)(zds->maxWindowSize >> 10) );
+ zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+ RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
+ frameParameter_windowTooLarge, "");
+
+ /* Adapt buffer sizes to frame header instructions */
+ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+ size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
+ ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
+ : 0;
+
+ ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
+
+ { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
+ int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
+
+ if (tooSmall || tooLarge) {
+ size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
+ DEBUGLOG(4, "inBuff : from %u to %u",
+ (U32)zds->inBuffSize, (U32)neededInBuffSize);
+ DEBUGLOG(4, "outBuff : from %u to %u",
+ (U32)zds->outBuffSize, (U32)neededOutBuffSize);
+ if (zds->staticSize) { /* static DCtx */
+ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
+ assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
+ RETURN_ERROR_IF(
+ bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
+ memory_allocation, "");
+ } else {
+ ZSTD_customFree(zds->inBuff, zds->customMem);
+ zds->inBuffSize = 0;
+ zds->outBuffSize = 0;
+ zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
+ RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
+ }
+ zds->inBuffSize = neededInBuffSize;
+ zds->outBuff = zds->inBuff + zds->inBuffSize;
+ zds->outBuffSize = neededOutBuffSize;
+ } } }
+ zds->streamStage = zdss_read;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_read:
+ DEBUGLOG(5, "stage zdss_read");
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
+ DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+ if (neededInSize==0) { /* end of frame */
+ zds->streamStage = zdss_init;
+ someMoreWork = 0;
+ break;
+ }
+ if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
+ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
+ ip += neededInSize;
+ /* Function modifies the stage so we must break */
+ break;
+ } }
+ if (ip==iend) { someMoreWork = 0; break; } /* no more input */
+ zds->streamStage = zdss_load;
+ ZSTD_FALLTHROUGH;
+
+ case zdss_load:
+ { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+ size_t const toLoad = neededInSize - zds->inPos;
+ int const isSkipFrame = ZSTD_isSkipFrame(zds);
+ size_t loadedSize;
+ /* At this point we shouldn't be decompressing a block that we can stream. */
+ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
+ if (isSkipFrame) {
+ loadedSize = MIN(toLoad, (size_t)(iend-ip));
+ } else {
+ RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
+ corruption_detected,
+ "should never happen");
+ loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
+ }
+ ip += loadedSize;
+ zds->inPos += loadedSize;
+ if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
+
+ /* decode loaded input */
+ zds->inPos = 0; /* input is consumed */
+ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
+ /* Function modifies the stage so we must break */
+ break;
+ }
+ case zdss_flush:
+ { size_t const toFlushSize = zds->outEnd - zds->outStart;
+ size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
+ op += flushedSize;
+ zds->outStart += flushedSize;
+ if (flushedSize == toFlushSize) { /* flush completed */
+ zds->streamStage = zdss_read;
+ if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+ && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+ DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+ (int)(zds->outBuffSize - zds->outStart),
+ (U32)zds->fParams.blockSizeMax);
+ zds->outStart = zds->outEnd = 0;
+ }
+ break;
+ } }
+ /* cannot complete flush */
+ someMoreWork = 0;
+ break;
+
+ default:
+ assert(0); /* impossible */
+ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */
+ } }
+
+ /* result */
+ input->pos = (size_t)(ip - (const char*)(input->src));
+ output->pos = (size_t)(op - (char*)(output->dst));
+
+ /* Update the expected output buffer for ZSTD_obm_stable. */
+ zds->expectedOutBuffer = *output;
+
+ if ((ip==istart) && (op==ostart)) { /* no forward progress */
+ zds->noForwardProgress ++;
+ if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
+ RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
+ RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
+ assert(0);
+ }
+ } else {
+ zds->noForwardProgress = 0;
+ }
+ { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+ if (!nextSrcSizeHint) { /* frame fully decoded */
+ if (zds->outEnd == zds->outStart) { /* output fully flushed */
+ if (zds->hostageByte) {
+ if (input->pos >= input->size) {
+ /* can't release hostage (not present) */
+ zds->streamStage = zdss_read;
+ return 1;
+ }
+ input->pos++; /* release hostage */
+ } /* zds->hostageByte */
+ return 0;
+ } /* zds->outEnd == zds->outStart */
+ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
+ zds->hostageByte=1;
+ }
+ return 1;
+ } /* nextSrcSizeHint==0 */
+ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
+ assert(zds->inPos <= nextSrcSizeHint);
+ nextSrcSizeHint -= zds->inPos; /* part already loaded*/
+ return nextSrcSizeHint;
+ }
+}
+
+size_t ZSTD_decompressStream_simpleArgs (
+ ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity, size_t* dstPos,
+ const void* src, size_t srcSize, size_t* srcPos)
+{
+ ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+ ZSTD_inBuffer input = { src, srcSize, *srcPos };
+ /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
+ *dstPos = output.pos;
+ *srcPos = input.pos;
+ return cErr;
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
new file mode 100644
index 000000000..c1913b8e7
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_block.c
@@ -0,0 +1,2072 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* zstd_decompress_block :
+ * this module takes care of decompressing _compressed_ block */
+
+/*-*******************************************************
+* Dependencies
+*********************************************************/
+#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
+#include "../common/compiler.h" /* prefetch */
+#include "../common/cpu.h" /* bmi2 */
+#include "../common/mem.h" /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "../common/fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "../common/huf.h"
+#include "../common/zstd_internal.h"
+#include "zstd_decompress_internal.h" /* ZSTD_DCtx */
+#include "zstd_ddict.h" /* ZSTD_DDictDictContent */
+#include "zstd_decompress_block.h"
+
+/*_*******************************************************
+* Macros
+**********************************************************/
+
+/* These two optional macros force the use one way or another of the two
+ * ZSTD_decompressSequences implementations. You can't force in both directions
+ * at the same time.
+ */
+#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
+#endif
+
+
+/*_*******************************************************
+* Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+ * Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+ * Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+ blockProperties_t* bpPtr)
+{
+ RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
+
+ { U32 const cBlockHeader = MEM_readLE24(src);
+ U32 const cSize = cBlockHeader >> 3;
+ bpPtr->lastBlock = cBlockHeader & 1;
+ bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+ bpPtr->origSize = cSize; /* only useful for RLE */
+ if (bpPtr->blockType == bt_rle) return 1;
+ RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
+ return cSize;
+ }
+}
+
+/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */
+static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
+ const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
+{
+ if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH)
+ {
+ /* room for litbuffer to fit without read faulting */
+ dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_in_dst;
+ }
+ else if (litSize > ZSTD_LITBUFFEREXTRASIZE)
+ {
+ /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ if (splitImmediately) {
+ /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
+ }
+ else {
+ /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */
+ dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
+ dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
+ }
+ dctx->litBufferLocation = ZSTD_split;
+ }
+ else
+ {
+ /* fits entirely within litExtraBuffer, so no split is necessary */
+ dctx->litBuffer = dctx->litExtraBuffer;
+ dctx->litBufferEnd = dctx->litBuffer + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ }
+}
+
+/* Hidden declaration for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize,
+ void* dst, size_t dstCapacity, const streaming_operation streaming);
+/*! ZSTD_decodeLiteralsBlock() :
+ * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
+ * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
+ * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being
+ * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write.
+ *
+ * @return : nb of bytes read from src (< srcSize )
+ * note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+ const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */
+ void* dst, size_t dstCapacity, const streaming_operation streaming)
+{
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
+ RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
+
+ { const BYTE* const istart = (const BYTE*) src;
+ symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+ switch(litEncType)
+ {
+ case set_repeat:
+ DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
+ RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
+ ZSTD_FALLTHROUGH;
+
+ case set_compressed:
+ RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
+ { size_t lhSize, litSize, litCSize;
+ U32 singleStream=0;
+ U32 const lhlCode = (istart[0] >> 2) & 3;
+ U32 const lhc = MEM_readLE32(istart);
+ size_t hufSuccess;
+ size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
+ /* 2 - 2 - 10 - 10 */
+ singleStream = !lhlCode;
+ lhSize = 3;
+ litSize = (lhc >> 4) & 0x3FF;
+ litCSize = (lhc >> 14) & 0x3FF;
+ break;
+ case 2:
+ /* 2 - 2 - 14 - 14 */
+ lhSize = 4;
+ litSize = (lhc >> 4) & 0x3FFF;
+ litCSize = lhc >> 18;
+ break;
+ case 3:
+ /* 2 - 2 - 18 - 18 */
+ lhSize = 5;
+ litSize = (lhc >> 4) & 0x3FFFF;
+ litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
+ break;
+ }
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0);
+
+ /* prefetch huffman table if cold */
+ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
+ PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
+ }
+
+ if (litEncType==set_repeat) {
+ if (singleStream) {
+ hufSuccess = HUF_decompress1X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
+ } else {
+ hufSuccess = HUF_decompress4X_usingDTable_bmi2(
+ dctx->litBuffer, litSize, istart+lhSize, litCSize,
+ dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx));
+ }
+ } else {
+ if (singleStream) {
+#if defined(HUF_FORCE_DECOMPRESS_X2)
+ hufSuccess = HUF_decompress1X_DCtx_wksp(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace));
+#else
+ hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
+#endif
+ } else {
+ hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
+ dctx->entropy.hufTable, dctx->litBuffer, litSize,
+ istart+lhSize, litCSize, dctx->workspace,
+ sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx));
+ }
+ }
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
+ dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
+ }
+
+ RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
+
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ dctx->litEntropy = 1;
+ if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+ return litCSize + lhSize;
+ }
+
+ case set_basic:
+ { size_t litSize, lhSize;
+ U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ break;
+ }
+
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
+ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
+ }
+ else
+ {
+ ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize);
+ }
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+litSize;
+ }
+ /* direct reference into compressed stream */
+ dctx->litPtr = istart+lhSize;
+ dctx->litSize = litSize;
+ dctx->litBufferEnd = dctx->litPtr + litSize;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ return lhSize+litSize;
+ }
+
+ case set_rle:
+ { U32 const lhlCode = ((istart[0]) >> 2) & 3;
+ size_t litSize, lhSize;
+ size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity);
+ switch(lhlCode)
+ {
+ case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
+ lhSize = 1;
+ litSize = istart[0] >> 3;
+ break;
+ case 1:
+ lhSize = 2;
+ litSize = MEM_readLE16(istart) >> 4;
+ break;
+ case 3:
+ lhSize = 3;
+ litSize = MEM_readLE24(istart) >> 4;
+ RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
+ break;
+ }
+ RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled");
+ RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
+ RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
+ ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
+ if (dctx->litBufferLocation == ZSTD_split)
+ {
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE);
+ ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE);
+ }
+ else
+ {
+ ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize);
+ }
+ dctx->litPtr = dctx->litBuffer;
+ dctx->litSize = litSize;
+ return lhSize+1;
+ }
+ default:
+ RETURN_ERROR(corruption_detected, "impossible");
+ }
+ }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 4, 0}, { 16, 0, 4, 0},
+ { 32, 0, 5, 1}, { 0, 0, 5, 3},
+ { 0, 0, 5, 4}, { 0, 0, 5, 6},
+ { 0, 0, 5, 7}, { 0, 0, 5, 9},
+ { 0, 0, 5, 10}, { 0, 0, 5, 12},
+ { 0, 0, 6, 14}, { 0, 1, 5, 16},
+ { 0, 1, 5, 20}, { 0, 1, 5, 22},
+ { 0, 2, 5, 28}, { 0, 3, 5, 32},
+ { 0, 4, 5, 48}, { 32, 6, 5, 64},
+ { 0, 7, 5, 128}, { 0, 8, 6, 256},
+ { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
+ { 32, 0, 4, 0}, { 0, 0, 4, 1},
+ { 0, 0, 5, 2}, { 32, 0, 5, 4},
+ { 0, 0, 5, 5}, { 32, 0, 5, 7},
+ { 0, 0, 5, 8}, { 32, 0, 5, 10},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 32, 1, 5, 16}, { 0, 1, 5, 18},
+ { 32, 1, 5, 22}, { 0, 2, 5, 24},
+ { 32, 3, 5, 32}, { 0, 3, 5, 40},
+ { 0, 6, 4, 64}, { 16, 6, 4, 64},
+ { 32, 7, 5, 128}, { 0, 9, 6, 512},
+ { 0, 11, 6, 2048}, { 48, 0, 4, 0},
+ { 16, 0, 4, 1}, { 32, 0, 5, 2},
+ { 32, 0, 5, 3}, { 32, 0, 5, 5},
+ { 32, 0, 5, 6}, { 32, 0, 5, 8},
+ { 32, 0, 5, 9}, { 32, 0, 5, 11},
+ { 32, 0, 5, 12}, { 0, 0, 6, 15},
+ { 32, 1, 5, 18}, { 32, 1, 5, 20},
+ { 32, 2, 5, 24}, { 32, 2, 5, 28},
+ { 32, 3, 5, 40}, { 32, 4, 5, 48},
+ { 0, 16, 6,65536}, { 0, 15, 6,32768},
+ { 0, 14, 6,16384}, { 0, 13, 6, 8192},
+}; /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 5, 0}, { 0, 6, 4, 61},
+ { 0, 9, 5, 509}, { 0, 15, 5,32765},
+ { 0, 21, 5,2097149}, { 0, 3, 5, 5},
+ { 0, 7, 4, 125}, { 0, 12, 5, 4093},
+ { 0, 18, 5,262141}, { 0, 23, 5,8388605},
+ { 0, 5, 5, 29}, { 0, 8, 4, 253},
+ { 0, 14, 5,16381}, { 0, 20, 5,1048573},
+ { 0, 2, 5, 1}, { 16, 7, 4, 125},
+ { 0, 11, 5, 2045}, { 0, 17, 5,131069},
+ { 0, 22, 5,4194301}, { 0, 4, 5, 13},
+ { 16, 8, 4, 253}, { 0, 13, 5, 8189},
+ { 0, 19, 5,524285}, { 0, 1, 5, 1},
+ { 16, 6, 4, 61}, { 0, 10, 5, 1021},
+ { 0, 16, 5,65533}, { 0, 28, 5,268435453},
+ { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
+ { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
+}; /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+ { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
+ /* nextState, nbAddBits, nbBits, baseVal */
+ { 0, 0, 6, 3}, { 0, 0, 4, 4},
+ { 32, 0, 5, 5}, { 0, 0, 5, 6},
+ { 0, 0, 5, 8}, { 0, 0, 5, 9},
+ { 0, 0, 5, 11}, { 0, 0, 6, 13},
+ { 0, 0, 6, 16}, { 0, 0, 6, 19},
+ { 0, 0, 6, 22}, { 0, 0, 6, 25},
+ { 0, 0, 6, 28}, { 0, 0, 6, 31},
+ { 0, 0, 6, 34}, { 0, 1, 6, 37},
+ { 0, 1, 6, 41}, { 0, 2, 6, 47},
+ { 0, 3, 6, 59}, { 0, 4, 6, 83},
+ { 0, 7, 6, 131}, { 0, 9, 6, 515},
+ { 16, 0, 4, 4}, { 0, 0, 4, 5},
+ { 32, 0, 5, 6}, { 0, 0, 5, 7},
+ { 32, 0, 5, 9}, { 0, 0, 5, 10},
+ { 0, 0, 6, 12}, { 0, 0, 6, 15},
+ { 0, 0, 6, 18}, { 0, 0, 6, 21},
+ { 0, 0, 6, 24}, { 0, 0, 6, 27},
+ { 0, 0, 6, 30}, { 0, 0, 6, 33},
+ { 0, 1, 6, 35}, { 0, 1, 6, 39},
+ { 0, 2, 6, 43}, { 0, 3, 6, 51},
+ { 0, 4, 6, 67}, { 0, 5, 6, 99},
+ { 0, 8, 6, 259}, { 32, 0, 4, 4},
+ { 48, 0, 4, 4}, { 16, 0, 4, 5},
+ { 32, 0, 5, 7}, { 32, 0, 5, 8},
+ { 32, 0, 5, 10}, { 32, 0, 5, 11},
+ { 0, 0, 6, 14}, { 0, 0, 6, 17},
+ { 0, 0, 6, 20}, { 0, 0, 6, 23},
+ { 0, 0, 6, 26}, { 0, 0, 6, 29},
+ { 0, 0, 6, 32}, { 0, 16, 6,65539},
+ { 0, 15, 6,32771}, { 0, 14, 6,16387},
+ { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
+ { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
+}; /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U8 nbAddBits)
+{
+ void* ptr = dt;
+ ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+ ZSTD_seqSymbol* const cell = dt + 1;
+
+ DTableH->tableLog = 0;
+ DTableH->fastMode = 0;
+
+ cell->nbBits = 0;
+ cell->nextState = 0;
+ assert(nbAddBits < 255);
+ cell->nbAdditionalBits = nbAddBits;
+ cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * cannot fail if input is valid =>
+ * all inputs are presumed validated at this stage */
+FORCE_INLINE_TEMPLATE
+void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_seqSymbol* const tableDecode = dt+1;
+ U32 const maxSV1 = maxSymbolValue + 1;
+ U32 const tableSize = 1 << tableLog;
+
+ U16* symbolNext = (U16*)wksp;
+ BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
+ U32 highThreshold = tableSize - 1;
+
+
+ /* Sanity Checks */
+ assert(maxSymbolValue <= MaxSeq);
+ assert(tableLog <= MaxFSELog);
+ assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
+ (void)wkspSize;
+ /* Init, lay down lowprob symbols */
+ { ZSTD_seqSymbol_header DTableH;
+ DTableH.tableLog = tableLog;
+ DTableH.fastMode = 1;
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
+ U32 s;
+ for (s=0; s<maxSV1; s++) {
+ if (normalizedCounter[s]==-1) {
+ tableDecode[highThreshold--].baseValue = s;
+ symbolNext[s] = 1;
+ } else {
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+ assert(normalizedCounter[s]>=0);
+ symbolNext[s] = (U16)normalizedCounter[s];
+ } } }
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
+ }
+
+ /* Spread symbols */
+ assert(tableSize <= 512);
+ /* Specialized symbol spreading for the case when there are
+ * no low probability (-1 count) symbols. When compressing
+ * small blocks we avoid low probability symbols to hit this
+ * case, since header decoding speed matters more.
+ */
+ if (highThreshold == tableSize - 1) {
+ size_t const tableMask = tableSize-1;
+ size_t const step = FSE_TABLESTEP(tableSize);
+ /* First lay down the symbols in order.
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
+ * misses since small blocks generally have small table logs, so nearly
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
+ * our buffer to handle the over-write.
+ */
+ {
+ U64 const add = 0x0101010101010101ull;
+ size_t pos = 0;
+ U64 sv = 0;
+ U32 s;
+ for (s=0; s<maxSV1; ++s, sv += add) {
+ int i;
+ int const n = normalizedCounter[s];
+ MEM_write64(spread + pos, sv);
+ for (i = 8; i < n; i += 8) {
+ MEM_write64(spread + pos + i, sv);
+ }
+ pos += n;
+ }
+ }
+ /* Now we spread those positions across the table.
+ * The benefit of doing it in two stages is that we avoid the the
+ * variable size inner loop, which caused lots of branch misses.
+ * Now we can run through all the positions without any branch misses.
+ * We unroll the loop twice, since that is what emperically worked best.
+ */
+ {
+ size_t position = 0;
+ size_t s;
+ size_t const unroll = 2;
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
+ size_t u;
+ for (u = 0; u < unroll; ++u) {
+ size_t const uPosition = (position + (u * step)) & tableMask;
+ tableDecode[uPosition].baseValue = spread[s + u];
+ }
+ position = (position + (unroll * step)) & tableMask;
+ }
+ assert(position == 0);
+ }
+ } else {
+ U32 const tableMask = tableSize-1;
+ U32 const step = FSE_TABLESTEP(tableSize);
+ U32 s, position = 0;
+ for (s=0; s<maxSV1; s++) {
+ int i;
+ int const n = normalizedCounter[s];
+ for (i=0; i<n; i++) {
+ tableDecode[position].baseValue = s;
+ position = (position + step) & tableMask;
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
+ } }
+ assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+ }
+
+ /* Build Decoding table */
+ {
+ U32 u;
+ for (u=0; u<tableSize; u++) {
+ U32 const symbol = tableDecode[u].baseValue;
+ U32 const nextState = symbolNext[symbol]++;
+ tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+ tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+ assert(nbAdditionalBits[symbol] < 255);
+ tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol];
+ tableDecode[u].baseValue = baseValue[symbol];
+ }
+ }
+}
+
+/* Avoids the FORCE_INLINE of the _body() function. */
+static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+#if DYNAMIC_BMI2
+BMI2_TARGET_ATTRIBUTE static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize)
+{
+ ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+#endif
+
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
+{
+#if DYNAMIC_BMI2
+ if (bmi2) {
+ ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+ return;
+ }
+#endif
+ (void)bmi2;
+ ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
+ baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ * or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+ symbolEncodingType_e type, unsigned max, U32 maxLog,
+ const void* src, size_t srcSize,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
+ int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
+ int bmi2)
+{
+ switch(type)
+ {
+ case set_rle :
+ RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
+ RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
+ { U32 const symbol = *(const BYTE*)src;
+ U32 const baseline = baseValue[symbol];
+ U8 const nbBits = nbAdditionalBits[symbol];
+ ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+ }
+ *DTablePtr = DTableSpace;
+ return 1;
+ case set_basic :
+ *DTablePtr = defaultTable;
+ return 0;
+ case set_repeat:
+ RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
+ /* prefetch FSE table if used */
+ if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
+ const void* const pStart = *DTablePtr;
+ size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
+ PREFETCH_AREA(pStart, pSize);
+ }
+ return 0;
+ case set_compressed :
+ { unsigned tableLog;
+ S16 norm[MaxSeq+1];
+ size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+ RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
+ RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
+ ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
+ *DTablePtr = DTableSpace;
+ return headerSize;
+ }
+ default :
+ assert(0);
+ RETURN_ERROR(GENERIC, "impossible");
+ }
+}
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+ const void* src, size_t srcSize)
+{
+ const BYTE* const istart = (const BYTE*)src;
+ const BYTE* const iend = istart + srcSize;
+ const BYTE* ip = istart;
+ int nbSeq;
+ DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+ /* check */
+ RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
+
+ /* SeqHead */
+ nbSeq = *ip++;
+ if (!nbSeq) {
+ *nbSeqPtr=0;
+ RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
+ return 1;
+ }
+ if (nbSeq > 0x7F) {
+ if (nbSeq == 0xFF) {
+ RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
+ nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
+ ip+=2;
+ } else {
+ RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
+ nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+ }
+ }
+ *nbSeqPtr = nbSeq;
+
+ /* FSE table descriptors */
+ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
+ { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+ symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+ symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+ ip++;
+
+ /* Build DTables */
+ { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+ LLtype, MaxLL, LLFSELog,
+ ip, iend-ip,
+ LL_base, LL_bits,
+ LL_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ ZSTD_DCtx_get_bmi2(dctx));
+ RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += llhSize;
+ }
+
+ { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+ OFtype, MaxOff, OffFSELog,
+ ip, iend-ip,
+ OF_base, OF_bits,
+ OF_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ ZSTD_DCtx_get_bmi2(dctx));
+ RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += ofhSize;
+ }
+
+ { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+ MLtype, MaxML, MLFSELog,
+ ip, iend-ip,
+ ML_base, ML_bits,
+ ML_defaultDTable, dctx->fseEntropy,
+ dctx->ddictIsCold, nbSeq,
+ dctx->workspace, sizeof(dctx->workspace),
+ ZSTD_DCtx_get_bmi2(dctx));
+ RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
+ ip += mlhSize;
+ }
+ }
+
+ return ip-istart;
+}
+
+
+typedef struct {
+ size_t litLength;
+ size_t matchLength;
+ size_t offset;
+} seq_t;
+
+typedef struct {
+ size_t state;
+ const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+ BIT_DStream_t DStream;
+ ZSTD_fseState stateLL;
+ ZSTD_fseState stateOffb;
+ ZSTD_fseState stateML;
+ size_t prevOffset[ZSTD_REP_NUM];
+} seqState_t;
+
+/*! ZSTD_overlapCopy8() :
+ * Copies 8 bytes from ip to op and updates op and ip where ip <= op.
+ * If the offset is < 8 then the offset is spread to at least 8 bytes.
+ *
+ * Precondition: *ip <= *op
+ * Postcondition: *op - *op >= 8
+ */
+HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
+ assert(*ip <= *op);
+ if (offset < 8) {
+ /* close range match, overlap */
+ static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
+ static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
+ int const sub2 = dec64table[offset];
+ (*op)[0] = (*ip)[0];
+ (*op)[1] = (*ip)[1];
+ (*op)[2] = (*ip)[2];
+ (*op)[3] = (*ip)[3];
+ *ip += dec32table[offset];
+ ZSTD_copy4(*op+4, *ip);
+ *ip -= sub2;
+ } else {
+ ZSTD_copy8(*op, *ip);
+ }
+ *ip += 8;
+ *op += 8;
+ assert(*op - *ip >= 8);
+}
+
+/*! ZSTD_safecopy() :
+ * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
+ * and write up to 16 bytes past oend_w (op >= oend_w is allowed).
+ * This function is only called in the uncommon case where the sequence is near the end of the block. It
+ * should be fast for a single long sequence, but can be slow for several short sequences.
+ *
+ * @param ovtype controls the overlap detection
+ * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
+ * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
+ * The src buffer must be before the dst buffer.
+ */
+static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
+ (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
+
+ if (length < 8) {
+ /* Handle short lengths. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+ if (ovtype == ZSTD_overlap_src_before_dst) {
+ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
+ assert(length >= 8);
+ ZSTD_overlapCopy8(&op, &ip, diff);
+ length -= 8;
+ assert(op - ip >= 8);
+ assert(op <= oend);
+ }
+
+ if (oend <= oend_w) {
+ /* No risk of overwrite. */
+ ZSTD_wildcopy(op, ip, length, ovtype);
+ return;
+ }
+ if (op <= oend_w) {
+ /* Wildcopy until we get close to the end. */
+ assert(oend > oend_w);
+ ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
+ ip += oend_w - op;
+ op += oend_w - op;
+ }
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_safecopyDstBeforeSrc():
+ * This version allows overlap with dst before src, or handles the non-overlap case with dst after src
+ * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
+static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) {
+ ptrdiff_t const diff = op - ip;
+ BYTE* const oend = op + length;
+
+ if (length < 8 || diff > -8) {
+ /* Handle short lengths, close overlaps, and dst not before src. */
+ while (op < oend) *op++ = *ip++;
+ return;
+ }
+
+ if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
+ ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap);
+ ip += oend - WILDCOPY_OVERLENGTH - op;
+ op += oend - WILDCOPY_OVERLENGTH - op;
+ }
+
+ /* Handle the leftovers. */
+ while (op < oend) *op++ = *ip++;
+}
+
+/* ZSTD_execSequenceEnd():
+ * This version handles cases that are near the end of the output buffer. It requires
+ * more careful checks to make sure there is no overflow. By separating out these hard
+ * and unlikely cases, we can speed up the common cases.
+ *
+ * NOTE: This function needs to be fast for a single long sequence, but doesn't need
+ * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
+ */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceEnd(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
+
+ /* copy literals */
+ ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
+ match = dictEnd - (prefixStart - match);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ return sequenceLength;
+}
+
+/* ZSTD_execSequenceEndSplitLitBuffer():
+ * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
+ */
+FORCE_NOINLINE
+size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+
+ /* bounds checks : careful of address space overflow in 32-bit mode */
+ RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
+ RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
+ assert(op < op + sequenceLength);
+ assert(oLitEnd < op + sequenceLength);
+
+ /* copy literals */
+ RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer");
+ ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength);
+ op = oLitEnd;
+ *litPtr = iLitEnd;
+
+ /* copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix */
+ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
+ match = dictEnd - (prefixStart - match);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
+ return sequenceLength;
+}
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+ BYTE* const oend, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
+ if (UNLIKELY(sequence.litLength > 16)) {
+ ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap);
+ }
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* Copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ }
+ }
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
+ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
+ }
+ assert(sequence.offset < WILDCOPY_VECLEN);
+
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
+
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
+ }
+ return sequenceLength;
+}
+
+HINT_INLINE
+size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
+ BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
+ const BYTE** litPtr, const BYTE* const litLimit,
+ const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
+{
+ BYTE* const oLitEnd = op + sequence.litLength;
+ size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+ BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
+ const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+ const BYTE* match = oLitEnd - sequence.offset;
+
+ assert(op != NULL /* Precondition */);
+ assert(oend_w < oend /* No underflow */);
+ /* Handle edge cases in a slow path:
+ * - Read beyond end of literals
+ * - Match end is within WILDCOPY_OVERLIMIT of oend
+ * - 32-bit mode and the match length overflows
+ */
+ if (UNLIKELY(
+ iLitEnd > litLimit ||
+ oMatchEnd > oend_w ||
+ (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
+ return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
+
+ /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
+ assert(op <= oLitEnd /* No overflow */);
+ assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
+ assert(oMatchEnd <= oend /* No underflow */);
+ assert(iLitEnd <= litLimit /* Literal length is in bounds */);
+ assert(oLitEnd <= oend_w /* Can wildcopy literals */);
+ assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
+
+ /* Copy Literals:
+ * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
+ * We likely don't need the full 32-byte wildcopy.
+ */
+ assert(WILDCOPY_OVERLENGTH >= 16);
+ ZSTD_copy16(op, (*litPtr));
+ if (UNLIKELY(sequence.litLength > 16)) {
+ ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
+ }
+ op = oLitEnd;
+ *litPtr = iLitEnd; /* update for next sequence */
+
+ /* Copy Match */
+ if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+ /* offset beyond prefix -> go into extDict */
+ RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
+ match = dictEnd + (match - prefixStart);
+ if (match + sequence.matchLength <= dictEnd) {
+ ZSTD_memmove(oLitEnd, match, sequence.matchLength);
+ return sequenceLength;
+ }
+ /* span extDict & currentPrefixSegment */
+ { size_t const length1 = dictEnd - match;
+ ZSTD_memmove(oLitEnd, match, length1);
+ op = oLitEnd + length1;
+ sequence.matchLength -= length1;
+ match = prefixStart;
+ } }
+ /* Match within prefix of 1 or more bytes */
+ assert(op <= oMatchEnd);
+ assert(oMatchEnd <= oend_w);
+ assert(match >= prefixStart);
+ assert(sequence.matchLength >= 1);
+
+ /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
+ * without overlap checking.
+ */
+ if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
+ /* We bet on a full wildcopy for matches, since we expect matches to be
+ * longer than literals (in general). In silesia, ~10% of matches are longer
+ * than 16 bytes.
+ */
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
+ return sequenceLength;
+ }
+ assert(sequence.offset < WILDCOPY_VECLEN);
+
+ /* Copy 8 bytes and spread the offset to be >= 8. */
+ ZSTD_overlapCopy8(&op, &match, sequence.offset);
+
+ /* If the match length is > 8 bytes, then continue with the wildcopy. */
+ if (sequence.matchLength > 8) {
+ assert(op < oMatchEnd);
+ ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
+ }
+ return sequenceLength;
+}
+
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+ const void* ptr = dt;
+ const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+ DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+ DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+ (U32)DStatePtr->state, DTableH->tableLog);
+ BIT_reloadDStream(bitD);
+ DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits)
+{
+ size_t const lowBits = BIT_readBits(bitD, nbBits);
+ DStatePtr->state = nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offsets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
+ (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
+ ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
+ : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+{
+ seq_t seq;
+ const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
+ const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
+ const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
+ seq.matchLength = mlDInfo->baseValue;
+ seq.litLength = llDInfo->baseValue;
+ { U32 const ofBase = ofDInfo->baseValue;
+ BYTE const llBits = llDInfo->nbAdditionalBits;
+ BYTE const mlBits = mlDInfo->nbAdditionalBits;
+ BYTE const ofBits = ofDInfo->nbAdditionalBits;
+ BYTE const totalBits = llBits+mlBits+ofBits;
+
+ U16 const llNext = llDInfo->nextState;
+ U16 const mlNext = mlDInfo->nextState;
+ U16 const ofNext = ofDInfo->nextState;
+ U32 const llnbBits = llDInfo->nbBits;
+ U32 const mlnbBits = mlDInfo->nbBits;
+ U32 const ofnbBits = ofDInfo->nbBits;
+ /*
+ * As gcc has better branch and block analyzers, sometimes it is only
+ * valuable to mark likelyness for clang, it gives around 3-4% of
+ * performance.
+ */
+
+ /* sequence */
+ { size_t offset;
+ #if defined(__clang__)
+ if (LIKELY(ofBits > 1)) {
+ #else
+ if (ofBits > 1) {
+ #endif
+ ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+ ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+ assert(ofBits <= MaxOff);
+ if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+ U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+ offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+ BIT_reloadDStream(&seqState->DStream);
+ if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+ assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
+ } else {
+ offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+ }
+ seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset;
+ } else {
+ U32 const ll0 = (llDInfo->baseValue == 0);
+ if (LIKELY((ofBits == 0))) {
+ offset = seqState->prevOffset[ll0];
+ seqState->prevOffset[1] = seqState->prevOffset[!ll0];
+ seqState->prevOffset[0] = offset;
+ } else {
+ offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
+ { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+ temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
+ if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+ seqState->prevOffset[1] = seqState->prevOffset[0];
+ seqState->prevOffset[0] = offset = temp;
+ } } }
+ seq.offset = offset;
+ }
+
+ #if defined(__clang__)
+ if (UNLIKELY(mlBits > 0))
+ #else
+ if (mlBits > 0)
+ #endif
+ seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
+
+ if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+ BIT_reloadDStream(&seqState->DStream);
+ if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+ BIT_reloadDStream(&seqState->DStream);
+ /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+ ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+ #if defined(__clang__)
+ if (UNLIKELY(llBits > 0))
+ #else
+ if (llBits > 0)
+ #endif
+ seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
+
+ if (MEM_32bits())
+ BIT_reloadDStream(&seqState->DStream);
+
+ DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+ ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
+ if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
+ ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
+ }
+
+ return seq;
+}
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
+{
+ size_t const windowSize = dctx->fParams.windowSize;
+ /* No dictionary used. */
+ if (dctx->dictContentEndForFuzzing == NULL) return 0;
+ /* Dictionary is our prefix. */
+ if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
+ /* Dictionary is not our ext-dict. */
+ if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
+ /* Dictionary is not within our window size. */
+ if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
+ /* Dictionary is active. */
+ return 1;
+}
+
+MEM_STATIC void ZSTD_assertValidSequence(
+ ZSTD_DCtx const* dctx,
+ BYTE const* op, BYTE const* oend,
+ seq_t const seq,
+ BYTE const* prefixStart, BYTE const* virtualStart)
+{
+#if DEBUGLEVEL >= 1
+ size_t const windowSize = dctx->fParams.windowSize;
+ size_t const sequenceSize = seq.litLength + seq.matchLength;
+ BYTE const* const oLitEnd = op + seq.litLength;
+ DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
+ (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+ assert(op <= oend);
+ assert((size_t)(oend - op) >= sequenceSize);
+ assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
+ if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
+ size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
+ /* Offset must be within the dictionary. */
+ assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
+ assert(seq.offset <= windowSize + dictSize);
+ } else {
+ /* Offset must be within our window. */
+ assert(seq.offset <= windowSize);
+ }
+#else
+ (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
+#endif
+}
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+
+
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer");
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+ ZSTD_STATIC_ASSERT(
+ BIT_DStream_unfinished < BIT_DStream_completed &&
+ BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+ BIT_DStream_completed < BIT_DStream_overflow);
+
+ /* decompress without overrunning litPtr begins */
+ {
+ seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ /* Align the decompression loop to 32 + 16 bytes.
+ *
+ * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
+ * speed swings based on the alignment of the decompression loop. This
+ * performance swing is caused by parts of the decompression loop falling
+ * out of the DSB. The entire decompression loop should fit in the DSB,
+ * when it can't we get much worse performance. You can measure if you've
+ * hit the good case or the bad case with this perf command for some
+ * compressed file test.zst:
+ *
+ * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
+ * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
+ *
+ * If you see most cycles served out of the MITE you've hit the bad case.
+ * If you see most cycles served out of the DSB you've hit the good case.
+ * If it is pretty even then you may be in an okay case.
+ *
+ * This issue has been reproduced on the following CPUs:
+ * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
+ * Use Instruments->Counters to get DSB/MITE cycles.
+ * I never got performance swings, but I was able to
+ * go from the good case of mostly DSB to half of the
+ * cycles served from MITE.
+ * - Coffeelake: Intel i9-9900k
+ * - Coffeelake: Intel i7-9700k
+ *
+ * I haven't been able to reproduce the instability or DSB misses on any
+ * of the following CPUS:
+ * - Haswell
+ * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
+ * - Skylake
+ *
+ * Alignment is done for each of the three major decompression loops:
+ * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer
+ * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer
+ * - ZSTD_decompressSequences_body
+ * Alignment choices are made to minimize large swings on bad cases and influence on performance
+ * from changes external to this code, rather than to overoptimize on the current commit.
+ *
+ * If you are seeing performance stability this script can help test.
+ * It tests on 4 commits in zstd where I saw performance change.
+ *
+ * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
+ */
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+# if __GNUC__ >= 7
+ /* good for gcc-7, gcc-9, and gcc-11 */
+ __asm__("nop");
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 4");
+# if __GNUC__ == 8 || __GNUC__ == 10
+ /* good for gcc-8 and gcc-10 */
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+# endif
+#endif
+
+ /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
+ for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) {
+ size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
+ BIT_reloadDStream(&(seqState.DStream));
+ sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ }
+
+ /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
+ if (nbSeq > 0) {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence.litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ {
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (--nbSeq)
+ BIT_reloadDStream(&(seqState.DStream));
+ }
+ }
+ }
+
+ if (nbSeq > 0) /* there is remaining lit from extra buffer */
+ {
+
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+# if __GNUC__ != 7
+ /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */
+ __asm__(".p2align 4");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# elif __GNUC__ >= 11
+ __asm__(".p2align 3");
+# else
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+#endif
+
+ for (; ; ) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
+ BIT_reloadDStream(&(seqState.DStream));
+ }
+ }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
+ RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+ RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
+ {
+ size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ }
+ { size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* const litEnd = litPtr + dctx->litSize;
+ const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
+ const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
+ DEBUGLOG(5, "ZSTD_decompressSequences_body");
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+ seqState_t seqState;
+ dctx->fseEntropy = 1;
+ { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+ assert(dst != NULL);
+
+ ZSTD_STATIC_ASSERT(
+ BIT_DStream_unfinished < BIT_DStream_completed &&
+ BIT_DStream_endOfBuffer < BIT_DStream_completed &&
+ BIT_DStream_completed < BIT_DStream_overflow);
+
+#if defined(__x86_64__)
+ __asm__(".p2align 6");
+ __asm__("nop");
+# if __GNUC__ >= 7
+ __asm__(".p2align 5");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# else
+ __asm__(".p2align 4");
+ __asm__("nop");
+ __asm__(".p2align 3");
+# endif
+#endif
+
+ for ( ; ; ) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
+#endif
+ if (UNLIKELY(ZSTD_isError(oneSeqSize)))
+ return oneSeqSize;
+ DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+ op += oneSeqSize;
+ if (UNLIKELY(!--nbSeq))
+ break;
+ BIT_reloadDStream(&(seqState.DStream));
+ }
+
+ /* check if reached exact end */
+ DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
+ RETURN_ERROR_IF(nbSeq, corruption_detected, "");
+ RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ { size_t const lastLLSize = litEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memcpy(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+
+static size_t
+ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
+ const BYTE* const prefixStart, const BYTE* const dictEnd)
+{
+ prefetchPos += sequence.litLength;
+ { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
+ const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+ * No consequence though : memory address is only used for prefetching, not for dereferencing */
+ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+ }
+ return prefetchPos + sequence.matchLength;
+}
+
+/* This decoding function employs prefetching
+ * to reduce latency impact of cache misses.
+ * It's generally employed when block contains a significant portion of long-distance matches
+ * or when coupled with a "cold" dictionary */
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ const BYTE* ip = (const BYTE*)seqStart;
+ const BYTE* const iend = ip + seqSize;
+ BYTE* const ostart = (BYTE*)dst;
+ BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize;
+ BYTE* op = ostart;
+ const BYTE* litPtr = dctx->litPtr;
+ const BYTE* litBufferEnd = dctx->litBufferEnd;
+ const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
+ const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
+ const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+ (void)frame;
+
+ /* Regen sequences */
+ if (nbSeq) {
+#define STORED_SEQS 8
+#define STORED_SEQS_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS STORED_SEQS
+ seq_t sequences[STORED_SEQS];
+ int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+ seqState_t seqState;
+ int seqNb;
+ size_t prefetchPos = (size_t)(op-prefixStart); /* track position relative to prefixStart */
+
+ dctx->fseEntropy = 1;
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+ assert(dst != NULL);
+ assert(iend >= ip);
+ RETURN_ERROR_IF(
+ ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
+ corruption_detected, "");
+ ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+ ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+ ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+ /* prepare in advance */
+ for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+ seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb] = sequence;
+ }
+ RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
+
+ /* decompress without stomping litBuffer */
+ for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) {
+ seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+ size_t oneSeqSize;
+
+ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd)
+ {
+ /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ else
+ {
+ /* lit buffer is either wholly contained in first or second split, or not split at all*/
+ oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+
+ prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
+ sequences[seqNb & STORED_SEQS_MASK] = sequence;
+ op += oneSeqSize;
+ }
+ }
+ RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
+
+ /* finish queue */
+ seqNb -= seqAdvance;
+ for ( ; seqNb<nbSeq ; seqNb++) {
+ seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
+ if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd)
+ {
+ const size_t leftoverLit = dctx->litBufferEnd - litPtr;
+ if (leftoverLit)
+ {
+ RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
+ ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
+ sequence->litLength -= leftoverLit;
+ op += leftoverLit;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ dctx->litBufferLocation = ZSTD_not_in_dst;
+ {
+ size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+ }
+ else
+ {
+ size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
+ ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
+ ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
+#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
+ assert(!ZSTD_isError(oneSeqSize));
+ if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
+#endif
+ if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+ op += oneSeqSize;
+ }
+ }
+
+ /* save reps for next block */
+ { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+ }
+
+ /* last literal segment */
+ if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */
+ {
+ size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ litPtr = dctx->litExtraBuffer;
+ litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
+ }
+ { size_t const lastLLSize = litBufferEnd - litPtr;
+ RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
+ if (op != NULL) {
+ ZSTD_memmove(op, litPtr, lastLLSize);
+ op += lastLLSize;
+ }
+ }
+
+ return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if DYNAMIC_BMI2
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static BMI2_TARGET_ATTRIBUTE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+static BMI2_TARGET_ATTRIBUTE size_t
+DONT_VECTORIZE
+ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+static BMI2_TARGET_ATTRIBUTE size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+#endif /* DYNAMIC_BMI2 */
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+ ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame);
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+static size_t
+ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+static size_t
+ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
+
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+/* ZSTD_decompressSequencesLong() :
+ * decompression function triggered when a minimum share of offsets is considered "long",
+ * aka out of cache.
+ * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
+ * This function will try to mitigate main memory latency through the use of prefetching */
+static size_t
+ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+ void* dst, size_t maxDstSize,
+ const void* seqStart, size_t seqSize, int nbSeq,
+ const ZSTD_longOffset_e isLongOffset,
+ const int frame)
+{
+ DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+ if (ZSTD_DCtx_get_bmi2(dctx)) {
+ return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+ }
+#endif
+ return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
+}
+#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
+
+
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ * compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+ const void* ptr = offTable;
+ U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+ const ZSTD_seqSymbol* table = offTable + 1;
+ U32 const max = 1 << tableLog;
+ U32 u, total = 0;
+ DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+ assert(max <= (1 << OffFSELog)); /* max not too large */
+ for (u=0; u<max; u++) {
+ if (table[u].nbAdditionalBits > 22) total += 1;
+ }
+
+ assert(tableLog <= OffFSELog);
+ total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
+
+ return total;
+}
+#endif
+
+size_t
+ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame, const streaming_operation streaming)
+{ /* blockType == blockCompressed */
+ const BYTE* ip = (const BYTE*)src;
+ /* isLongOffset must be true if there are long offsets.
+ * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+ * We don't expect that to be the case in 64-bit mode.
+ * In block mode, window size is not known, so we have to be conservative.
+ * (note: but it could be evaluated from current-lowLimit)
+ */
+ ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
+ DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+ RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
+
+ /* Decode literals section */
+ { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
+ DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+ if (ZSTD_isError(litCSize)) return litCSize;
+ ip += litCSize;
+ srcSize -= litCSize;
+ }
+
+ /* Build Decoding Tables */
+ {
+ /* These macros control at build-time which decompressor implementation
+ * we use. If neither is defined, we do some inspection and dispatch at
+ * runtime.
+ */
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ int usePrefetchDecoder = dctx->ddictIsCold;
+#endif
+ int nbSeq;
+ size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+ if (ZSTD_isError(seqHSize)) return seqHSize;
+ ip += seqHSize;
+ srcSize -= seqHSize;
+
+ RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if ( !usePrefetchDecoder
+ && (!frame || (dctx->fParams.windowSize > (1<<24)))
+ && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
+ U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+ U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+ usePrefetchDecoder = (shareLongOffsets >= minShare);
+ }
+#endif
+
+ dctx->ddictIsCold = 0;
+
+#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
+ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
+ if (usePrefetchDecoder)
+#endif
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
+ return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+#endif
+
+#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
+ /* else */
+ if (dctx->litBufferLocation == ZSTD_split)
+ return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+ else
+ return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
+#endif
+ }
+}
+
+
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
+{
+ if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
+ dctx->dictEnd = dctx->previousDstEnd;
+ dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
+ dctx->prefixStart = dst;
+ dctx->previousDstEnd = dst;
+ }
+}
+
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize)
+{
+ size_t dSize;
+ ZSTD_checkContinuity(dctx, dst, dstCapacity);
+ dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming);
+ dctx->previousDstEnd = (char*)dst + dSize;
+ return dSize;
+}
diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
new file mode 100644
index 000000000..3d2d57a5d
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_block.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#ifndef ZSTD_DEC_BLOCK_H
+#define ZSTD_DEC_BLOCK_H
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/zstd_deps.h" /* size_t */
+#include <linux/zstd.h> /* DCtx, and some public functions */
+#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */
+#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */
+
+
+/* === Prototypes === */
+
+/* note: prototypes already published within `zstd.h` :
+ * ZSTD_decompressBlock()
+ */
+
+/* note: prototypes already published within `zstd_internal.h` :
+ * ZSTD_getcBlockSize()
+ * ZSTD_decodeSeqHeaders()
+ */
+
+
+ /* Streaming state is used to inform allocation of the literal buffer */
+typedef enum {
+ not_streaming = 0,
+ is_streaming = 1
+} streaming_operation;
+
+/* ZSTD_decompressBlock_internal() :
+ * decompress block, starting at `src`,
+ * into destination buffer `dst`.
+ * @return : decompressed block size,
+ * or an error code (which can be tested using ZSTD_isError())
+ */
+size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize, const int frame, const streaming_operation streaming);
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off)
+ * this function must be called with valid parameters only
+ * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
+ * in which case it cannot fail.
+ * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
+ * defined in zstd_decompress_internal.h.
+ * Internal use only.
+ */
+void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+ const short* normalizedCounter, unsigned maxSymbolValue,
+ const U32* baseValue, const U8* nbAdditionalBits,
+ unsigned tableLog, void* wksp, size_t wkspSize,
+ int bmi2);
+
+
+#endif /* ZSTD_DEC_BLOCK_H */
diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
new file mode 100644
index 000000000..98102edb6
--- /dev/null
+++ b/lib/zstd/decompress/zstd_decompress_internal.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* zstd_decompress_internal:
+ * objects and definitions shared within lib/decompress modules */
+
+ #ifndef ZSTD_DECOMPRESS_INTERNAL_H
+ #define ZSTD_DECOMPRESS_INTERNAL_H
+
+
+/*-*******************************************************
+ * Dependencies
+ *********************************************************/
+#include "../common/mem.h" /* BYTE, U16, U32 */
+#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */
+
+
+
+/*-*******************************************************
+ * Constants
+ *********************************************************/
+static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 18, 20, 22, 24, 28, 32, 40,
+ 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x8000, 0x10000 };
+
+static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
+ 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
+ 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
+ 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+ 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31 };
+
+static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
+ 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 37, 39, 41, 43, 47, 51, 59,
+ 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+ 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+
+/*-*******************************************************
+ * Decompression types
+ *********************************************************/
+ typedef struct {
+ U32 fastMode;
+ U32 tableLog;
+ } ZSTD_seqSymbol_header;
+
+ typedef struct {
+ U16 nextState;
+ BYTE nbAdditionalBits;
+ BYTE nbBits;
+ U32 baseValue;
+ } ZSTD_seqSymbol;
+
+ #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
+
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
+#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
+
+typedef struct {
+ ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */
+ ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */
+ ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
+ HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
+ U32 rep[ZSTD_REP_NUM];
+ U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
+} ZSTD_entropyDTables_t;
+
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+ ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+ ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+ ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+ zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+typedef enum {
+ ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */
+ ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */
+ ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */
+} ZSTD_dictUses_e;
+
+/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
+typedef struct {
+ const ZSTD_DDict** ddictPtrTable;
+ size_t ddictPtrTableSize;
+ size_t ddictPtrCount;
+} ZSTD_DDictHashSet;
+
+#ifndef ZSTD_DECODER_INTERNAL_BUFFER
+# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16)
+#endif
+
+#define ZSTD_LBMIN 64
+#define ZSTD_LBMAX (128 << 10)
+
+/* extra buffer, compensates when dst is not large enough to store litBuffer */
+#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
+
+typedef enum {
+ ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */
+ ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */
+ ZSTD_split = 2 /* Split between litExtraBuffer and dst */
+} ZSTD_litLocation_e;
+
+struct ZSTD_DCtx_s
+{
+ const ZSTD_seqSymbol* LLTptr;
+ const ZSTD_seqSymbol* MLTptr;
+ const ZSTD_seqSymbol* OFTptr;
+ const HUF_DTable* HUFptr;
+ ZSTD_entropyDTables_t entropy;
+ U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */
+ const void* previousDstEnd; /* detect continuity */
+ const void* prefixStart; /* start of current segment */
+ const void* virtualStart; /* virtual start of previous segment if it was just before current one */
+ const void* dictEnd; /* end of previous segment */
+ size_t expected;
+ ZSTD_frameHeader fParams;
+ U64 processedCSize;
+ U64 decodedSize;
+ blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+ ZSTD_dStage stage;
+ U32 litEntropy;
+ U32 fseEntropy;
+ struct xxh64_state xxhState;
+ size_t headerSize;
+ ZSTD_format_e format;
+ ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum; /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
+ U32 validateChecksum; /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
+ const BYTE* litPtr;
+ ZSTD_customMem customMem;
+ size_t litSize;
+ size_t rleSize;
+ size_t staticSize;
+#if DYNAMIC_BMI2 != 0
+ int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+#endif
+
+ /* dictionary */
+ ZSTD_DDict* ddictLocal;
+ const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
+ U32 dictID;
+ int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
+ ZSTD_dictUses_e dictUses;
+ ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */
+ ZSTD_refMultipleDDicts_e refMultipleDDicts; /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
+
+ /* streaming */
+ ZSTD_dStreamStage streamStage;
+ char* inBuff;
+ size_t inBuffSize;
+ size_t inPos;
+ size_t maxWindowSize;
+ char* outBuff;
+ size_t outBuffSize;
+ size_t outStart;
+ size_t outEnd;
+ size_t lhSize;
+ U32 hostageByte;
+ int noForwardProgress;
+ ZSTD_bufferMode_e outBufferMode;
+ ZSTD_outBuffer expectedOutBuffer;
+
+ /* workspace */
+ BYTE* litBuffer;
+ const BYTE* litBufferEnd;
+ ZSTD_litLocation_e litBufferLocation;
+ BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */
+ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+
+ size_t oversizedDuration;
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ void const* dictContentBeginForFuzzing;
+ void const* dictContentEndForFuzzing;
+#endif
+
+ /* Tracing */
+}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) {
+#if DYNAMIC_BMI2 != 0
+ return dctx->bmi2;
+#else
+ (void)dctx;
+ return 0;
+#endif
+}
+
+/*-*******************************************************
+ * Shared internal functions
+ *********************************************************/
+
+/*! ZSTD_loadDEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary.
+ * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
+size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
+ const void* const dict, size_t const dictSize);
+
+/*! ZSTD_checkContinuity() :
+ * check if next `dst` follows previous position, where decompression ended.
+ * If yes, do nothing (continue on current segment).
+ * If not, classify previous segment as "external dictionary", and start a new segment.
+ * This function cannot fail. */
+void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
+
+
+#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
new file mode 100644
index 000000000..a06ca187a
--- /dev/null
+++ b/lib/zstd/decompress_sources.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*
+ * This file includes every .c file needed for decompression.
+ * It is used by lib/decompress_unzstd.c to include the decompression
+ * source into the translation-unit, so it can be used for kernel
+ * decompression.
+ */
+
+/*
+ * Disable the ASM Huffman implementation because we need to
+ * include all the sources.
+ */
+#define ZSTD_DISABLE_ASM 1
+
+#include "common/debug.c"
+#include "common/entropy_common.c"
+#include "common/error_private.c"
+#include "common/fse_decompress.c"
+#include "common/zstd_common.c"
+#include "decompress/huf_decompress.c"
+#include "decompress/zstd_ddict.c"
+#include "decompress/zstd_decompress.c"
+#include "decompress/zstd_decompress_block.c"
+#include "zstd_decompress_module.c"
diff --git a/lib/zstd/zstd_common_module.c b/lib/zstd/zstd_common_module.c
new file mode 100644
index 000000000..22686e367
--- /dev/null
+++ b/lib/zstd/zstd_common_module.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/module.h>
+
+#include "common/huf.h"
+#include "common/fse.h"
+#include "common/zstd_internal.h"
+
+// Export symbols shared by compress and decompress into a common module
+
+#undef ZSTD_isError /* defined within zstd_internal.h */
+EXPORT_SYMBOL_GPL(FSE_readNCount);
+EXPORT_SYMBOL_GPL(HUF_readStats);
+EXPORT_SYMBOL_GPL(HUF_readStats_wksp);
+EXPORT_SYMBOL_GPL(ZSTD_isError);
+EXPORT_SYMBOL_GPL(ZSTD_getErrorName);
+EXPORT_SYMBOL_GPL(ZSTD_getErrorCode);
+EXPORT_SYMBOL_GPL(ZSTD_customMalloc);
+EXPORT_SYMBOL_GPL(ZSTD_customCalloc);
+EXPORT_SYMBOL_GPL(ZSTD_customFree);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Common");
diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
new file mode 100644
index 000000000..04e1b5c01
--- /dev/null
+++ b/lib/zstd/zstd_compress_module.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/zstd.h>
+
+#include "common/zstd_deps.h"
+#include "common/zstd_internal.h"
+
+#define ZSTD_FORWARD_IF_ERR(ret) \
+ do { \
+ size_t const __ret = (ret); \
+ if (ZSTD_isError(__ret)) \
+ return __ret; \
+ } while (0)
+
+static size_t zstd_cctx_init(zstd_cctx *cctx, const zstd_parameters *parameters,
+ unsigned long long pledged_src_size)
+{
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_reset(
+ cctx, ZSTD_reset_session_and_parameters));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setPledgedSrcSize(
+ cctx, pledged_src_size));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_windowLog, parameters->cParams.windowLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_hashLog, parameters->cParams.hashLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_chainLog, parameters->cParams.chainLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_searchLog, parameters->cParams.searchLog));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_minMatch, parameters->cParams.minMatch));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_targetLength, parameters->cParams.targetLength));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_strategy, parameters->cParams.strategy));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_contentSizeFlag, parameters->fParams.contentSizeFlag));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_checksumFlag, parameters->fParams.checksumFlag));
+ ZSTD_FORWARD_IF_ERR(ZSTD_CCtx_setParameter(
+ cctx, ZSTD_c_dictIDFlag, !parameters->fParams.noDictIDFlag));
+ return 0;
+}
+
+int zstd_min_clevel(void)
+{
+ return ZSTD_minCLevel();
+}
+EXPORT_SYMBOL(zstd_min_clevel);
+
+int zstd_max_clevel(void)
+{
+ return ZSTD_maxCLevel();
+}
+EXPORT_SYMBOL(zstd_max_clevel);
+
+size_t zstd_compress_bound(size_t src_size)
+{
+ return ZSTD_compressBound(src_size);
+}
+EXPORT_SYMBOL(zstd_compress_bound);
+
+zstd_parameters zstd_get_params(int level,
+ unsigned long long estimated_src_size)
+{
+ return ZSTD_getParams(level, estimated_src_size, 0);
+}
+EXPORT_SYMBOL(zstd_get_params);
+
+size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
+{
+ return ZSTD_estimateCCtxSize_usingCParams(*cparams);
+}
+EXPORT_SYMBOL(zstd_cctx_workspace_bound);
+
+zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ return ZSTD_initStaticCCtx(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_cctx);
+
+size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size, const zstd_parameters *parameters)
+{
+ ZSTD_FORWARD_IF_ERR(zstd_cctx_init(cctx, parameters, src_size));
+ return ZSTD_compress2(cctx, dst, dst_capacity, src, src_size);
+}
+EXPORT_SYMBOL(zstd_compress_cctx);
+
+size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
+{
+ return ZSTD_estimateCStreamSize_usingCParams(*cparams);
+}
+EXPORT_SYMBOL(zstd_cstream_workspace_bound);
+
+zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
+ unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
+{
+ zstd_cstream *cstream;
+
+ if (workspace == NULL)
+ return NULL;
+
+ cstream = ZSTD_initStaticCStream(workspace, workspace_size);
+ if (cstream == NULL)
+ return NULL;
+
+ /* 0 means unknown in linux zstd API but means 0 in new zstd API */
+ if (pledged_src_size == 0)
+ pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
+
+ if (ZSTD_isError(zstd_cctx_init(cstream, parameters, pledged_src_size)))
+ return NULL;
+
+ return cstream;
+}
+EXPORT_SYMBOL(zstd_init_cstream);
+
+size_t zstd_reset_cstream(zstd_cstream *cstream,
+ unsigned long long pledged_src_size)
+{
+ if (pledged_src_size == 0)
+ pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
+ ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_reset(cstream, ZSTD_reset_session_only) );
+ ZSTD_FORWARD_IF_ERR( ZSTD_CCtx_setPledgedSrcSize(cstream, pledged_src_size) );
+ return 0;
+}
+EXPORT_SYMBOL(zstd_reset_cstream);
+
+size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
+ zstd_in_buffer *input)
+{
+ return ZSTD_compressStream(cstream, output, input);
+}
+EXPORT_SYMBOL(zstd_compress_stream);
+
+size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
+{
+ return ZSTD_flushStream(cstream, output);
+}
+EXPORT_SYMBOL(zstd_flush_stream);
+
+size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
+{
+ return ZSTD_endStream(cstream, output);
+}
+EXPORT_SYMBOL(zstd_end_stream);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Compressor");
diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
new file mode 100644
index 000000000..f4ed952ed
--- /dev/null
+++ b/lib/zstd/zstd_decompress_module.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/zstd.h>
+
+#include "common/zstd_deps.h"
+
+/* Common symbols. zstd_compress must depend on zstd_decompress. */
+
+unsigned int zstd_is_error(size_t code)
+{
+ return ZSTD_isError(code);
+}
+EXPORT_SYMBOL(zstd_is_error);
+
+zstd_error_code zstd_get_error_code(size_t code)
+{
+ return ZSTD_getErrorCode(code);
+}
+EXPORT_SYMBOL(zstd_get_error_code);
+
+const char *zstd_get_error_name(size_t code)
+{
+ return ZSTD_getErrorName(code);
+}
+EXPORT_SYMBOL(zstd_get_error_name);
+
+/* Decompression symbols. */
+
+size_t zstd_dctx_workspace_bound(void)
+{
+ return ZSTD_estimateDCtxSize();
+}
+EXPORT_SYMBOL(zstd_dctx_workspace_bound);
+
+zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ return ZSTD_initStaticDCtx(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_dctx);
+
+size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
+ const void *src, size_t src_size)
+{
+ return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
+}
+EXPORT_SYMBOL(zstd_decompress_dctx);
+
+size_t zstd_dstream_workspace_bound(size_t max_window_size)
+{
+ return ZSTD_estimateDStreamSize(max_window_size);
+}
+EXPORT_SYMBOL(zstd_dstream_workspace_bound);
+
+zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
+ size_t workspace_size)
+{
+ if (workspace == NULL)
+ return NULL;
+ (void)max_window_size;
+ return ZSTD_initStaticDStream(workspace, workspace_size);
+}
+EXPORT_SYMBOL(zstd_init_dstream);
+
+size_t zstd_reset_dstream(zstd_dstream *dstream)
+{
+ return ZSTD_resetDStream(dstream);
+}
+EXPORT_SYMBOL(zstd_reset_dstream);
+
+size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
+ zstd_in_buffer *input)
+{
+ return ZSTD_decompressStream(dstream, output, input);
+}
+EXPORT_SYMBOL(zstd_decompress_stream);
+
+size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
+{
+ return ZSTD_findFrameCompressedSize(src, src_size);
+}
+EXPORT_SYMBOL(zstd_find_frame_compressed_size);
+
+size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
+ size_t src_size)
+{
+ return ZSTD_getFrameHeader(header, src, src_size);
+}
+EXPORT_SYMBOL(zstd_get_frame_header);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Zstd Decompressor");